`_
-
-To turn on coverage tracing, use the following code::
-
- cherrypy.engine.subscribe('start', covercp.start)
-
-DO NOT subscribe anything on the 'start_thread' channel, as previously
-recommended. Calling start once in the main thread should be sufficient
-to start coverage on all threads. Calling start again in each thread
-effectively clears any coverage data gathered up to that point.
-
-Run your code, then use the ``covercp.serve()`` function to browse the
-results in a web browser. If you run this module from the command line,
-it will call ``serve()`` for you.
-"""
-
-import re
-import sys
-import cgi
-from cherrypy._cpcompat import quote_plus
-import os, os.path
-localFile = os.path.join(os.path.dirname(__file__), "coverage.cache")
-
-the_coverage = None
-try:
- from coverage import coverage
- the_coverage = coverage(data_file=localFile)
- def start():
- the_coverage.start()
-except ImportError:
- # Setting the_coverage to None will raise errors
- # that need to be trapped downstream.
- the_coverage = None
-
- import warnings
- warnings.warn("No code coverage will be performed; coverage.py could not be imported.")
-
- def start():
- pass
-start.priority = 20
-
-TEMPLATE_MENU = """
-
- CherryPy Coverage Menu
-
-
-
-CherryPy Coverage
"""
-
-TEMPLATE_FORM = """
-
-
-
"""
-
-TEMPLATE_FRAMESET = """
-CherryPy coverage data
-
-
-"""
-
-TEMPLATE_COVERAGE = """
-
- Coverage for %(name)s
-
-
-
-%(name)s
-%(fullpath)s
-Coverage: %(pc)s%%
"""
-
-TEMPLATE_LOC_COVERED = """
- %s |
- %s |
-
\n"""
-TEMPLATE_LOC_NOT_COVERED = """
- %s |
- %s |
-
\n"""
-TEMPLATE_LOC_EXCLUDED = """
- %s |
- %s |
-
\n"""
-
-TEMPLATE_ITEM = "%s%s%s\n"
-
-def _percent(statements, missing):
- s = len(statements)
- e = s - len(missing)
- if s > 0:
- return int(round(100.0 * e / s))
- return 0
-
-def _show_branch(root, base, path, pct=0, showpct=False, exclude="",
- coverage=the_coverage):
-
- # Show the directory name and any of our children
- dirs = [k for k, v in root.items() if v]
- dirs.sort()
- for name in dirs:
- newpath = os.path.join(path, name)
-
- if newpath.lower().startswith(base):
- relpath = newpath[len(base):]
- yield "| " * relpath.count(os.sep)
- yield "%s\n" % \
- (newpath, quote_plus(exclude), name)
-
- for chunk in _show_branch(root[name], base, newpath, pct, showpct, exclude, coverage=coverage):
- yield chunk
-
- # Now list the files
- if path.lower().startswith(base):
- relpath = path[len(base):]
- files = [k for k, v in root.items() if not v]
- files.sort()
- for name in files:
- newpath = os.path.join(path, name)
-
- pc_str = ""
- if showpct:
- try:
- _, statements, _, missing, _ = coverage.analysis2(newpath)
- except:
- # Yes, we really want to pass on all errors.
- pass
- else:
- pc = _percent(statements, missing)
- pc_str = ("%3d%% " % pc).replace(' ',' ')
- if pc < float(pct) or pc == -1:
- pc_str = "%s" % pc_str
- else:
- pc_str = "%s" % pc_str
-
- yield TEMPLATE_ITEM % ("| " * (relpath.count(os.sep) + 1),
- pc_str, newpath, name)
-
-def _skip_file(path, exclude):
- if exclude:
- return bool(re.search(exclude, path))
-
-def _graft(path, tree):
- d = tree
-
- p = path
- atoms = []
- while True:
- p, tail = os.path.split(p)
- if not tail:
- break
- atoms.append(tail)
- atoms.append(p)
- if p != "/":
- atoms.append("/")
-
- atoms.reverse()
- for node in atoms:
- if node:
- d = d.setdefault(node, {})
-
-def get_tree(base, exclude, coverage=the_coverage):
- """Return covered module names as a nested dict."""
- tree = {}
- runs = coverage.data.executed_files()
- for path in runs:
- if not _skip_file(path, exclude) and not os.path.isdir(path):
- _graft(path, tree)
- return tree
-
-class CoverStats(object):
-
- def __init__(self, coverage, root=None):
- self.coverage = coverage
- if root is None:
- # Guess initial depth. Files outside this path will not be
- # reachable from the web interface.
- import cherrypy
- root = os.path.dirname(cherrypy.__file__)
- self.root = root
-
- def index(self):
- return TEMPLATE_FRAMESET % self.root.lower()
- index.exposed = True
-
- def menu(self, base="/", pct="50", showpct="",
- exclude=r'python\d\.\d|test|tut\d|tutorial'):
-
- # The coverage module uses all-lower-case names.
- base = base.lower().rstrip(os.sep)
-
- yield TEMPLATE_MENU
- yield TEMPLATE_FORM % locals()
-
- # Start by showing links for parent paths
- yield ""
- path = ""
- atoms = base.split(os.sep)
- atoms.pop()
- for atom in atoms:
- path += atom + os.sep
- yield ("
%s %s"
- % (path, quote_plus(exclude), atom, os.sep))
- yield "
"
-
- yield ""
-
- # Then display the tree
- tree = get_tree(base, exclude, self.coverage)
- if not tree:
- yield "
No modules covered.
"
- else:
- for chunk in _show_branch(tree, base, "/", pct,
- showpct=='checked', exclude, coverage=self.coverage):
- yield chunk
-
- yield "
"
- yield ""
- menu.exposed = True
-
- def annotated_file(self, filename, statements, excluded, missing):
- source = open(filename, 'r')
- buffer = []
- for lineno, line in enumerate(source.readlines()):
- lineno += 1
- line = line.strip("\n\r")
- empty_the_buffer = True
- if lineno in excluded:
- template = TEMPLATE_LOC_EXCLUDED
- elif lineno in missing:
- template = TEMPLATE_LOC_NOT_COVERED
- elif lineno in statements:
- template = TEMPLATE_LOC_COVERED
- else:
- empty_the_buffer = False
- buffer.append((lineno, line))
- if empty_the_buffer:
- for lno, pastline in buffer:
- yield template % (lno, cgi.escape(pastline))
- buffer = []
- yield template % (lineno, cgi.escape(line))
-
- def report(self, name):
- filename, statements, excluded, missing, _ = self.coverage.analysis2(name)
- pc = _percent(statements, missing)
- yield TEMPLATE_COVERAGE % dict(name=os.path.basename(name),
- fullpath=name,
- pc=pc)
- yield '\n'
- for line in self.annotated_file(filename, statements, excluded,
- missing):
- yield line
- yield '
'
- yield ''
- yield ''
- report.exposed = True
-
-
-def serve(path=localFile, port=8080, root=None):
- if coverage is None:
- raise ImportError("The coverage module could not be imported.")
- from coverage import coverage
- cov = coverage(data_file = path)
- cov.load()
-
- import cherrypy
- cherrypy.config.update({'server.socket_port': int(port),
- 'server.thread_pool': 10,
- 'environment': "production",
- })
- cherrypy.quickstart(CoverStats(cov, root))
-
-if __name__ == "__main__":
- serve(*tuple(sys.argv[1:]))
-
diff --git a/python-packages/cherrypy/lib/cpstats.py b/python-packages/cherrypy/lib/cpstats.py
deleted file mode 100644
index 9be947f2b7..0000000000
--- a/python-packages/cherrypy/lib/cpstats.py
+++ /dev/null
@@ -1,662 +0,0 @@
-"""CPStats, a package for collecting and reporting on program statistics.
-
-Overview
-========
-
-Statistics about program operation are an invaluable monitoring and debugging
-tool. Unfortunately, the gathering and reporting of these critical values is
-usually ad-hoc. This package aims to add a centralized place for gathering
-statistical performance data, a structure for recording that data which
-provides for extrapolation of that data into more useful information,
-and a method of serving that data to both human investigators and
-monitoring software. Let's examine each of those in more detail.
-
-Data Gathering
---------------
-
-Just as Python's `logging` module provides a common importable for gathering
-and sending messages, performance statistics would benefit from a similar
-common mechanism, and one that does *not* require each package which wishes
-to collect stats to import a third-party module. Therefore, we choose to
-re-use the `logging` module by adding a `statistics` object to it.
-
-That `logging.statistics` object is a nested dict. It is not a custom class,
-because that would 1) require libraries and applications to import a third-
-party module in order to participate, 2) inhibit innovation in extrapolation
-approaches and in reporting tools, and 3) be slow. There are, however, some
-specifications regarding the structure of the dict.
-
- {
- +----"SQLAlchemy": {
- | "Inserts": 4389745,
- | "Inserts per Second":
- | lambda s: s["Inserts"] / (time() - s["Start"]),
- | C +---"Table Statistics": {
- | o | "widgets": {-----------+
- N | l | "Rows": 1.3M, | Record
- a | l | "Inserts": 400, |
- m | e | },---------------------+
- e | c | "froobles": {
- s | t | "Rows": 7845,
- p | i | "Inserts": 0,
- a | o | },
- c | n +---},
- e | "Slow Queries":
- | [{"Query": "SELECT * FROM widgets;",
- | "Processing Time": 47.840923343,
- | },
- | ],
- +----},
- }
-
-The `logging.statistics` dict has four levels. The topmost level is nothing
-more than a set of names to introduce modularity, usually along the lines of
-package names. If the SQLAlchemy project wanted to participate, for example,
-it might populate the item `logging.statistics['SQLAlchemy']`, whose value
-would be a second-layer dict we call a "namespace". Namespaces help multiple
-packages to avoid collisions over key names, and make reports easier to read,
-to boot. The maintainers of SQLAlchemy should feel free to use more than one
-namespace if needed (such as 'SQLAlchemy ORM'). Note that there are no case
-or other syntax constraints on the namespace names; they should be chosen
-to be maximally readable by humans (neither too short nor too long).
-
-Each namespace, then, is a dict of named statistical values, such as
-'Requests/sec' or 'Uptime'. You should choose names which will look
-good on a report: spaces and capitalization are just fine.
-
-In addition to scalars, values in a namespace MAY be a (third-layer)
-dict, or a list, called a "collection". For example, the CherryPy StatsTool
-keeps track of what each request is doing (or has most recently done)
-in a 'Requests' collection, where each key is a thread ID; each
-value in the subdict MUST be a fourth dict (whew!) of statistical data about
-each thread. We call each subdict in the collection a "record". Similarly,
-the StatsTool also keeps a list of slow queries, where each record contains
-data about each slow query, in order.
-
-Values in a namespace or record may also be functions, which brings us to:
-
-Extrapolation
--------------
-
-The collection of statistical data needs to be fast, as close to unnoticeable
-as possible to the host program. That requires us to minimize I/O, for example,
-but in Python it also means we need to minimize function calls. So when you
-are designing your namespace and record values, try to insert the most basic
-scalar values you already have on hand.
-
-When it comes time to report on the gathered data, however, we usually have
-much more freedom in what we can calculate. Therefore, whenever reporting
-tools (like the provided StatsPage CherryPy class) fetch the contents of
-`logging.statistics` for reporting, they first call `extrapolate_statistics`
-(passing the whole `statistics` dict as the only argument). This makes a
-deep copy of the statistics dict so that the reporting tool can both iterate
-over it and even change it without harming the original. But it also expands
-any functions in the dict by calling them. For example, you might have a
-'Current Time' entry in the namespace with the value "lambda scope: time.time()".
-The "scope" parameter is the current namespace dict (or record, if we're
-currently expanding one of those instead), allowing you access to existing
-static entries. If you're truly evil, you can even modify more than one entry
-at a time.
-
-However, don't try to calculate an entry and then use its value in further
-extrapolations; the order in which the functions are called is not guaranteed.
-This can lead to a certain amount of duplicated work (or a redesign of your
-schema), but that's better than complicating the spec.
-
-After the whole thing has been extrapolated, it's time for:
-
-Reporting
----------
-
-The StatsPage class grabs the `logging.statistics` dict, extrapolates it all,
-and then transforms it to HTML for easy viewing. Each namespace gets its own
-header and attribute table, plus an extra table for each collection. This is
-NOT part of the statistics specification; other tools can format how they like.
-
-You can control which columns are output and how they are formatted by updating
-StatsPage.formatting, which is a dict that mirrors the keys and nesting of
-`logging.statistics`. The difference is that, instead of data values, it has
-formatting values. Use None for a given key to indicate to the StatsPage that a
-given column should not be output. Use a string with formatting (such as '%.3f')
-to interpolate the value(s), or use a callable (such as lambda v: v.isoformat())
-for more advanced formatting. Any entry which is not mentioned in the formatting
-dict is output unchanged.
-
-Monitoring
-----------
-
-Although the HTML output takes pains to assign unique id's to each with
-statistical data, you're probably better off fetching /cpstats/data, which
-outputs the whole (extrapolated) `logging.statistics` dict in JSON format.
-That is probably easier to parse, and doesn't have any formatting controls,
-so you get the "original" data in a consistently-serialized format.
-Note: there's no treatment yet for datetime objects. Try time.time() instead
-for now if you can. Nagios will probably thank you.
-
-Turning Collection Off
-----------------------
-
-It is recommended each namespace have an "Enabled" item which, if False,
-stops collection (but not reporting) of statistical data. Applications
-SHOULD provide controls to pause and resume collection by setting these
-entries to False or True, if present.
-
-
-Usage
-=====
-
-To collect statistics on CherryPy applications:
-
- from cherrypy.lib import cpstats
- appconfig['/']['tools.cpstats.on'] = True
-
-To collect statistics on your own code:
-
- import logging
- # Initialize the repository
- if not hasattr(logging, 'statistics'): logging.statistics = {}
- # Initialize my namespace
- mystats = logging.statistics.setdefault('My Stuff', {})
- # Initialize my namespace's scalars and collections
- mystats.update({
- 'Enabled': True,
- 'Start Time': time.time(),
- 'Important Events': 0,
- 'Events/Second': lambda s: (
- (s['Important Events'] / (time.time() - s['Start Time']))),
- })
- ...
- for event in events:
- ...
- # Collect stats
- if mystats.get('Enabled', False):
- mystats['Important Events'] += 1
-
-To report statistics:
-
- root.cpstats = cpstats.StatsPage()
-
-To format statistics reports:
-
- See 'Reporting', above.
-
-"""
-
-# -------------------------------- Statistics -------------------------------- #
-
-import logging
-if not hasattr(logging, 'statistics'): logging.statistics = {}
-
-def extrapolate_statistics(scope):
- """Return an extrapolated copy of the given scope."""
- c = {}
- for k, v in list(scope.items()):
- if isinstance(v, dict):
- v = extrapolate_statistics(v)
- elif isinstance(v, (list, tuple)):
- v = [extrapolate_statistics(record) for record in v]
- elif hasattr(v, '__call__'):
- v = v(scope)
- c[k] = v
- return c
-
-
-# --------------------- CherryPy Applications Statistics --------------------- #
-
-import threading
-import time
-
-import cherrypy
-
-appstats = logging.statistics.setdefault('CherryPy Applications', {})
-appstats.update({
- 'Enabled': True,
- 'Bytes Read/Request': lambda s: (s['Total Requests'] and
- (s['Total Bytes Read'] / float(s['Total Requests'])) or 0.0),
- 'Bytes Read/Second': lambda s: s['Total Bytes Read'] / s['Uptime'](s),
- 'Bytes Written/Request': lambda s: (s['Total Requests'] and
- (s['Total Bytes Written'] / float(s['Total Requests'])) or 0.0),
- 'Bytes Written/Second': lambda s: s['Total Bytes Written'] / s['Uptime'](s),
- 'Current Time': lambda s: time.time(),
- 'Current Requests': 0,
- 'Requests/Second': lambda s: float(s['Total Requests']) / s['Uptime'](s),
- 'Server Version': cherrypy.__version__,
- 'Start Time': time.time(),
- 'Total Bytes Read': 0,
- 'Total Bytes Written': 0,
- 'Total Requests': 0,
- 'Total Time': 0,
- 'Uptime': lambda s: time.time() - s['Start Time'],
- 'Requests': {},
- })
-
-proc_time = lambda s: time.time() - s['Start Time']
-
-
-class ByteCountWrapper(object):
- """Wraps a file-like object, counting the number of bytes read."""
-
- def __init__(self, rfile):
- self.rfile = rfile
- self.bytes_read = 0
-
- def read(self, size=-1):
- data = self.rfile.read(size)
- self.bytes_read += len(data)
- return data
-
- def readline(self, size=-1):
- data = self.rfile.readline(size)
- self.bytes_read += len(data)
- return data
-
- def readlines(self, sizehint=0):
- # Shamelessly stolen from StringIO
- total = 0
- lines = []
- line = self.readline()
- while line:
- lines.append(line)
- total += len(line)
- if 0 < sizehint <= total:
- break
- line = self.readline()
- return lines
-
- def close(self):
- self.rfile.close()
-
- def __iter__(self):
- return self
-
- def next(self):
- data = self.rfile.next()
- self.bytes_read += len(data)
- return data
-
-
-average_uriset_time = lambda s: s['Count'] and (s['Sum'] / s['Count']) or 0
-
-
-class StatsTool(cherrypy.Tool):
- """Record various information about the current request."""
-
- def __init__(self):
- cherrypy.Tool.__init__(self, 'on_end_request', self.record_stop)
-
- def _setup(self):
- """Hook this tool into cherrypy.request.
-
- The standard CherryPy request object will automatically call this
- method when the tool is "turned on" in config.
- """
- if appstats.get('Enabled', False):
- cherrypy.Tool._setup(self)
- self.record_start()
-
- def record_start(self):
- """Record the beginning of a request."""
- request = cherrypy.serving.request
- if not hasattr(request.rfile, 'bytes_read'):
- request.rfile = ByteCountWrapper(request.rfile)
- request.body.fp = request.rfile
-
- r = request.remote
-
- appstats['Current Requests'] += 1
- appstats['Total Requests'] += 1
- appstats['Requests'][threading._get_ident()] = {
- 'Bytes Read': None,
- 'Bytes Written': None,
- # Use a lambda so the ip gets updated by tools.proxy later
- 'Client': lambda s: '%s:%s' % (r.ip, r.port),
- 'End Time': None,
- 'Processing Time': proc_time,
- 'Request-Line': request.request_line,
- 'Response Status': None,
- 'Start Time': time.time(),
- }
-
- def record_stop(self, uriset=None, slow_queries=1.0, slow_queries_count=100,
- debug=False, **kwargs):
- """Record the end of a request."""
- resp = cherrypy.serving.response
- w = appstats['Requests'][threading._get_ident()]
-
- r = cherrypy.request.rfile.bytes_read
- w['Bytes Read'] = r
- appstats['Total Bytes Read'] += r
-
- if resp.stream:
- w['Bytes Written'] = 'chunked'
- else:
- cl = int(resp.headers.get('Content-Length', 0))
- w['Bytes Written'] = cl
- appstats['Total Bytes Written'] += cl
-
- w['Response Status'] = getattr(resp, 'output_status', None) or resp.status
-
- w['End Time'] = time.time()
- p = w['End Time'] - w['Start Time']
- w['Processing Time'] = p
- appstats['Total Time'] += p
-
- appstats['Current Requests'] -= 1
-
- if debug:
- cherrypy.log('Stats recorded: %s' % repr(w), 'TOOLS.CPSTATS')
-
- if uriset:
- rs = appstats.setdefault('URI Set Tracking', {})
- r = rs.setdefault(uriset, {
- 'Min': None, 'Max': None, 'Count': 0, 'Sum': 0,
- 'Avg': average_uriset_time})
- if r['Min'] is None or p < r['Min']:
- r['Min'] = p
- if r['Max'] is None or p > r['Max']:
- r['Max'] = p
- r['Count'] += 1
- r['Sum'] += p
-
- if slow_queries and p > slow_queries:
- sq = appstats.setdefault('Slow Queries', [])
- sq.append(w.copy())
- if len(sq) > slow_queries_count:
- sq.pop(0)
-
-
-import cherrypy
-cherrypy.tools.cpstats = StatsTool()
-
-
-# ---------------------- CherryPy Statistics Reporting ---------------------- #
-
-import os
-thisdir = os.path.abspath(os.path.dirname(__file__))
-
-try:
- import json
-except ImportError:
- try:
- import simplejson as json
- except ImportError:
- json = None
-
-
-missing = object()
-
-locale_date = lambda v: time.strftime('%c', time.gmtime(v))
-iso_format = lambda v: time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(v))
-
-def pause_resume(ns):
- def _pause_resume(enabled):
- pause_disabled = ''
- resume_disabled = ''
- if enabled:
- resume_disabled = 'disabled="disabled" '
- else:
- pause_disabled = 'disabled="disabled" '
- return """
-
-
- """ % (ns, pause_disabled, ns, resume_disabled)
- return _pause_resume
-
-
-class StatsPage(object):
-
- formatting = {
- 'CherryPy Applications': {
- 'Enabled': pause_resume('CherryPy Applications'),
- 'Bytes Read/Request': '%.3f',
- 'Bytes Read/Second': '%.3f',
- 'Bytes Written/Request': '%.3f',
- 'Bytes Written/Second': '%.3f',
- 'Current Time': iso_format,
- 'Requests/Second': '%.3f',
- 'Start Time': iso_format,
- 'Total Time': '%.3f',
- 'Uptime': '%.3f',
- 'Slow Queries': {
- 'End Time': None,
- 'Processing Time': '%.3f',
- 'Start Time': iso_format,
- },
- 'URI Set Tracking': {
- 'Avg': '%.3f',
- 'Max': '%.3f',
- 'Min': '%.3f',
- 'Sum': '%.3f',
- },
- 'Requests': {
- 'Bytes Read': '%s',
- 'Bytes Written': '%s',
- 'End Time': None,
- 'Processing Time': '%.3f',
- 'Start Time': None,
- },
- },
- 'CherryPy WSGIServer': {
- 'Enabled': pause_resume('CherryPy WSGIServer'),
- 'Connections/second': '%.3f',
- 'Start time': iso_format,
- },
- }
-
-
- def index(self):
- # Transform the raw data into pretty output for HTML
- yield """
-
-
- Statistics
-
-
-
-"""
- for title, scalars, collections in self.get_namespaces():
- yield """
-%s
-
-
-
-""" % title
- for i, (key, value) in enumerate(scalars):
- colnum = i % 3
- if colnum == 0: yield """
- """
- yield """
- %(key)s | %(value)s | """ % vars()
- if colnum == 2: yield """
- """
-
- if colnum == 0: yield """
- | |
- | |
- """
- elif colnum == 1: yield """
- | |
- """
- yield """
-
- """
-
- for subtitle, headers, subrows in collections:
- yield """
-%s
-
-
- """ % subtitle
- for key in headers:
- yield """
- %s | """ % key
- yield """
-
-
- """
- for subrow in subrows:
- yield """
- """
- for value in subrow:
- yield """
- %s | """ % value
- yield """
- """
- yield """
-
- """
- yield """
-
-
-"""
- index.exposed = True
-
- def get_namespaces(self):
- """Yield (title, scalars, collections) for each namespace."""
- s = extrapolate_statistics(logging.statistics)
- for title, ns in sorted(s.items()):
- scalars = []
- collections = []
- ns_fmt = self.formatting.get(title, {})
- for k, v in sorted(ns.items()):
- fmt = ns_fmt.get(k, {})
- if isinstance(v, dict):
- headers, subrows = self.get_dict_collection(v, fmt)
- collections.append((k, ['ID'] + headers, subrows))
- elif isinstance(v, (list, tuple)):
- headers, subrows = self.get_list_collection(v, fmt)
- collections.append((k, headers, subrows))
- else:
- format = ns_fmt.get(k, missing)
- if format is None:
- # Don't output this column.
- continue
- if hasattr(format, '__call__'):
- v = format(v)
- elif format is not missing:
- v = format % v
- scalars.append((k, v))
- yield title, scalars, collections
-
- def get_dict_collection(self, v, formatting):
- """Return ([headers], [rows]) for the given collection."""
- # E.g., the 'Requests' dict.
- headers = []
- for record in v.itervalues():
- for k3 in record:
- format = formatting.get(k3, missing)
- if format is None:
- # Don't output this column.
- continue
- if k3 not in headers:
- headers.append(k3)
- headers.sort()
-
- subrows = []
- for k2, record in sorted(v.items()):
- subrow = [k2]
- for k3 in headers:
- v3 = record.get(k3, '')
- format = formatting.get(k3, missing)
- if format is None:
- # Don't output this column.
- continue
- if hasattr(format, '__call__'):
- v3 = format(v3)
- elif format is not missing:
- v3 = format % v3
- subrow.append(v3)
- subrows.append(subrow)
-
- return headers, subrows
-
- def get_list_collection(self, v, formatting):
- """Return ([headers], [subrows]) for the given collection."""
- # E.g., the 'Slow Queries' list.
- headers = []
- for record in v:
- for k3 in record:
- format = formatting.get(k3, missing)
- if format is None:
- # Don't output this column.
- continue
- if k3 not in headers:
- headers.append(k3)
- headers.sort()
-
- subrows = []
- for record in v:
- subrow = []
- for k3 in headers:
- v3 = record.get(k3, '')
- format = formatting.get(k3, missing)
- if format is None:
- # Don't output this column.
- continue
- if hasattr(format, '__call__'):
- v3 = format(v3)
- elif format is not missing:
- v3 = format % v3
- subrow.append(v3)
- subrows.append(subrow)
-
- return headers, subrows
-
- if json is not None:
- def data(self):
- s = extrapolate_statistics(logging.statistics)
- cherrypy.response.headers['Content-Type'] = 'application/json'
- return json.dumps(s, sort_keys=True, indent=4)
- data.exposed = True
-
- def pause(self, namespace):
- logging.statistics.get(namespace, {})['Enabled'] = False
- raise cherrypy.HTTPRedirect('./')
- pause.exposed = True
- pause.cp_config = {'tools.allow.on': True,
- 'tools.allow.methods': ['POST']}
-
- def resume(self, namespace):
- logging.statistics.get(namespace, {})['Enabled'] = True
- raise cherrypy.HTTPRedirect('./')
- resume.exposed = True
- resume.cp_config = {'tools.allow.on': True,
- 'tools.allow.methods': ['POST']}
-
diff --git a/python-packages/cherrypy/lib/cptools.py b/python-packages/cherrypy/lib/cptools.py
deleted file mode 100644
index b426a3e784..0000000000
--- a/python-packages/cherrypy/lib/cptools.py
+++ /dev/null
@@ -1,617 +0,0 @@
-"""Functions for builtin CherryPy tools."""
-
-import logging
-import re
-
-import cherrypy
-from cherrypy._cpcompat import basestring, ntob, md5, set
-from cherrypy.lib import httputil as _httputil
-
-
-# Conditional HTTP request support #
-
-def validate_etags(autotags=False, debug=False):
- """Validate the current ETag against If-Match, If-None-Match headers.
-
- If autotags is True, an ETag response-header value will be provided
- from an MD5 hash of the response body (unless some other code has
- already provided an ETag header). If False (the default), the ETag
- will not be automatic.
-
- WARNING: the autotags feature is not designed for URL's which allow
- methods other than GET. For example, if a POST to the same URL returns
- no content, the automatic ETag will be incorrect, breaking a fundamental
- use for entity tags in a possibly destructive fashion. Likewise, if you
- raise 304 Not Modified, the response body will be empty, the ETag hash
- will be incorrect, and your application will break.
- See :rfc:`2616` Section 14.24.
- """
- response = cherrypy.serving.response
-
- # Guard against being run twice.
- if hasattr(response, "ETag"):
- return
-
- status, reason, msg = _httputil.valid_status(response.status)
-
- etag = response.headers.get('ETag')
-
- # Automatic ETag generation. See warning in docstring.
- if etag:
- if debug:
- cherrypy.log('ETag already set: %s' % etag, 'TOOLS.ETAGS')
- elif not autotags:
- if debug:
- cherrypy.log('Autotags off', 'TOOLS.ETAGS')
- elif status != 200:
- if debug:
- cherrypy.log('Status not 200', 'TOOLS.ETAGS')
- else:
- etag = response.collapse_body()
- etag = '"%s"' % md5(etag).hexdigest()
- if debug:
- cherrypy.log('Setting ETag: %s' % etag, 'TOOLS.ETAGS')
- response.headers['ETag'] = etag
-
- response.ETag = etag
-
- # "If the request would, without the If-Match header field, result in
- # anything other than a 2xx or 412 status, then the If-Match header
- # MUST be ignored."
- if debug:
- cherrypy.log('Status: %s' % status, 'TOOLS.ETAGS')
- if status >= 200 and status <= 299:
- request = cherrypy.serving.request
-
- conditions = request.headers.elements('If-Match') or []
- conditions = [str(x) for x in conditions]
- if debug:
- cherrypy.log('If-Match conditions: %s' % repr(conditions),
- 'TOOLS.ETAGS')
- if conditions and not (conditions == ["*"] or etag in conditions):
- raise cherrypy.HTTPError(412, "If-Match failed: ETag %r did "
- "not match %r" % (etag, conditions))
-
- conditions = request.headers.elements('If-None-Match') or []
- conditions = [str(x) for x in conditions]
- if debug:
- cherrypy.log('If-None-Match conditions: %s' % repr(conditions),
- 'TOOLS.ETAGS')
- if conditions == ["*"] or etag in conditions:
- if debug:
- cherrypy.log('request.method: %s' % request.method, 'TOOLS.ETAGS')
- if request.method in ("GET", "HEAD"):
- raise cherrypy.HTTPRedirect([], 304)
- else:
- raise cherrypy.HTTPError(412, "If-None-Match failed: ETag %r "
- "matched %r" % (etag, conditions))
-
-def validate_since():
- """Validate the current Last-Modified against If-Modified-Since headers.
-
- If no code has set the Last-Modified response header, then no validation
- will be performed.
- """
- response = cherrypy.serving.response
- lastmod = response.headers.get('Last-Modified')
- if lastmod:
- status, reason, msg = _httputil.valid_status(response.status)
-
- request = cherrypy.serving.request
-
- since = request.headers.get('If-Unmodified-Since')
- if since and since != lastmod:
- if (status >= 200 and status <= 299) or status == 412:
- raise cherrypy.HTTPError(412)
-
- since = request.headers.get('If-Modified-Since')
- if since and since == lastmod:
- if (status >= 200 and status <= 299) or status == 304:
- if request.method in ("GET", "HEAD"):
- raise cherrypy.HTTPRedirect([], 304)
- else:
- raise cherrypy.HTTPError(412)
-
-
-# Tool code #
-
-def allow(methods=None, debug=False):
- """Raise 405 if request.method not in methods (default ['GET', 'HEAD']).
-
- The given methods are case-insensitive, and may be in any order.
- If only one method is allowed, you may supply a single string;
- if more than one, supply a list of strings.
-
- Regardless of whether the current method is allowed or not, this
- also emits an 'Allow' response header, containing the given methods.
- """
- if not isinstance(methods, (tuple, list)):
- methods = [methods]
- methods = [m.upper() for m in methods if m]
- if not methods:
- methods = ['GET', 'HEAD']
- elif 'GET' in methods and 'HEAD' not in methods:
- methods.append('HEAD')
-
- cherrypy.response.headers['Allow'] = ', '.join(methods)
- if cherrypy.request.method not in methods:
- if debug:
- cherrypy.log('request.method %r not in methods %r' %
- (cherrypy.request.method, methods), 'TOOLS.ALLOW')
- raise cherrypy.HTTPError(405)
- else:
- if debug:
- cherrypy.log('request.method %r in methods %r' %
- (cherrypy.request.method, methods), 'TOOLS.ALLOW')
-
-
-def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For',
- scheme='X-Forwarded-Proto', debug=False):
- """Change the base URL (scheme://host[:port][/path]).
-
- For running a CP server behind Apache, lighttpd, or other HTTP server.
-
- For Apache and lighttpd, you should leave the 'local' argument at the
- default value of 'X-Forwarded-Host'. For Squid, you probably want to set
- tools.proxy.local = 'Origin'.
-
- If you want the new request.base to include path info (not just the host),
- you must explicitly set base to the full base path, and ALSO set 'local'
- to '', so that the X-Forwarded-Host request header (which never includes
- path info) does not override it. Regardless, the value for 'base' MUST
- NOT end in a slash.
-
- cherrypy.request.remote.ip (the IP address of the client) will be
- rewritten if the header specified by the 'remote' arg is valid.
- By default, 'remote' is set to 'X-Forwarded-For'. If you do not
- want to rewrite remote.ip, set the 'remote' arg to an empty string.
- """
-
- request = cherrypy.serving.request
-
- if scheme:
- s = request.headers.get(scheme, None)
- if debug:
- cherrypy.log('Testing scheme %r:%r' % (scheme, s), 'TOOLS.PROXY')
- if s == 'on' and 'ssl' in scheme.lower():
- # This handles e.g. webfaction's 'X-Forwarded-Ssl: on' header
- scheme = 'https'
- else:
- # This is for lighttpd/pound/Mongrel's 'X-Forwarded-Proto: https'
- scheme = s
- if not scheme:
- scheme = request.base[:request.base.find("://")]
-
- if local:
- lbase = request.headers.get(local, None)
- if debug:
- cherrypy.log('Testing local %r:%r' % (local, lbase), 'TOOLS.PROXY')
- if lbase is not None:
- base = lbase.split(',')[0]
- if not base:
- port = request.local.port
- if port == 80:
- base = '127.0.0.1'
- else:
- base = '127.0.0.1:%s' % port
-
- if base.find("://") == -1:
- # add http:// or https:// if needed
- base = scheme + "://" + base
-
- request.base = base
-
- if remote:
- xff = request.headers.get(remote)
- if debug:
- cherrypy.log('Testing remote %r:%r' % (remote, xff), 'TOOLS.PROXY')
- if xff:
- if remote == 'X-Forwarded-For':
- # See http://bob.pythonmac.org/archives/2005/09/23/apache-x-forwarded-for-caveat/
- xff = xff.split(',')[-1].strip()
- request.remote.ip = xff
-
-
-def ignore_headers(headers=('Range',), debug=False):
- """Delete request headers whose field names are included in 'headers'.
-
- This is a useful tool for working behind certain HTTP servers;
- for example, Apache duplicates the work that CP does for 'Range'
- headers, and will doubly-truncate the response.
- """
- request = cherrypy.serving.request
- for name in headers:
- if name in request.headers:
- if debug:
- cherrypy.log('Ignoring request header %r' % name,
- 'TOOLS.IGNORE_HEADERS')
- del request.headers[name]
-
-
-def response_headers(headers=None, debug=False):
- """Set headers on the response."""
- if debug:
- cherrypy.log('Setting response headers: %s' % repr(headers),
- 'TOOLS.RESPONSE_HEADERS')
- for name, value in (headers or []):
- cherrypy.serving.response.headers[name] = value
-response_headers.failsafe = True
-
-
-def referer(pattern, accept=True, accept_missing=False, error=403,
- message='Forbidden Referer header.', debug=False):
- """Raise HTTPError if Referer header does/does not match the given pattern.
-
- pattern
- A regular expression pattern to test against the Referer.
-
- accept
- If True, the Referer must match the pattern; if False,
- the Referer must NOT match the pattern.
-
- accept_missing
- If True, permit requests with no Referer header.
-
- error
- The HTTP error code to return to the client on failure.
-
- message
- A string to include in the response body on failure.
-
- """
- try:
- ref = cherrypy.serving.request.headers['Referer']
- match = bool(re.match(pattern, ref))
- if debug:
- cherrypy.log('Referer %r matches %r' % (ref, pattern),
- 'TOOLS.REFERER')
- if accept == match:
- return
- except KeyError:
- if debug:
- cherrypy.log('No Referer header', 'TOOLS.REFERER')
- if accept_missing:
- return
-
- raise cherrypy.HTTPError(error, message)
-
-
-class SessionAuth(object):
- """Assert that the user is logged in."""
-
- session_key = "username"
- debug = False
-
- def check_username_and_password(self, username, password):
- pass
-
- def anonymous(self):
- """Provide a temporary user name for anonymous users."""
- pass
-
- def on_login(self, username):
- pass
-
- def on_logout(self, username):
- pass
-
- def on_check(self, username):
- pass
-
- def login_screen(self, from_page='..', username='', error_msg='', **kwargs):
- return ntob("""
-Message: %(error_msg)s
-
-""" % {'from_page': from_page, 'username': username,
- 'error_msg': error_msg}, "utf-8")
-
- def do_login(self, username, password, from_page='..', **kwargs):
- """Login. May raise redirect, or return True if request handled."""
- response = cherrypy.serving.response
- error_msg = self.check_username_and_password(username, password)
- if error_msg:
- body = self.login_screen(from_page, username, error_msg)
- response.body = body
- if "Content-Length" in response.headers:
- # Delete Content-Length header so finalize() recalcs it.
- del response.headers["Content-Length"]
- return True
- else:
- cherrypy.serving.request.login = username
- cherrypy.session[self.session_key] = username
- self.on_login(username)
- raise cherrypy.HTTPRedirect(from_page or "/")
-
- def do_logout(self, from_page='..', **kwargs):
- """Logout. May raise redirect, or return True if request handled."""
- sess = cherrypy.session
- username = sess.get(self.session_key)
- sess[self.session_key] = None
- if username:
- cherrypy.serving.request.login = None
- self.on_logout(username)
- raise cherrypy.HTTPRedirect(from_page)
-
- def do_check(self):
- """Assert username. May raise redirect, or return True if request handled."""
- sess = cherrypy.session
- request = cherrypy.serving.request
- response = cherrypy.serving.response
-
- username = sess.get(self.session_key)
- if not username:
- sess[self.session_key] = username = self.anonymous()
- if self.debug:
- cherrypy.log('No session[username], trying anonymous', 'TOOLS.SESSAUTH')
- if not username:
- url = cherrypy.url(qs=request.query_string)
- if self.debug:
- cherrypy.log('No username, routing to login_screen with '
- 'from_page %r' % url, 'TOOLS.SESSAUTH')
- response.body = self.login_screen(url)
- if "Content-Length" in response.headers:
- # Delete Content-Length header so finalize() recalcs it.
- del response.headers["Content-Length"]
- return True
- if self.debug:
- cherrypy.log('Setting request.login to %r' % username, 'TOOLS.SESSAUTH')
- request.login = username
- self.on_check(username)
-
- def run(self):
- request = cherrypy.serving.request
- response = cherrypy.serving.response
-
- path = request.path_info
- if path.endswith('login_screen'):
- if self.debug:
- cherrypy.log('routing %r to login_screen' % path, 'TOOLS.SESSAUTH')
- return self.login_screen(**request.params)
- elif path.endswith('do_login'):
- if request.method != 'POST':
- response.headers['Allow'] = "POST"
- if self.debug:
- cherrypy.log('do_login requires POST', 'TOOLS.SESSAUTH')
- raise cherrypy.HTTPError(405)
- if self.debug:
- cherrypy.log('routing %r to do_login' % path, 'TOOLS.SESSAUTH')
- return self.do_login(**request.params)
- elif path.endswith('do_logout'):
- if request.method != 'POST':
- response.headers['Allow'] = "POST"
- raise cherrypy.HTTPError(405)
- if self.debug:
- cherrypy.log('routing %r to do_logout' % path, 'TOOLS.SESSAUTH')
- return self.do_logout(**request.params)
- else:
- if self.debug:
- cherrypy.log('No special path, running do_check', 'TOOLS.SESSAUTH')
- return self.do_check()
-
-
-def session_auth(**kwargs):
- sa = SessionAuth()
- for k, v in kwargs.items():
- setattr(sa, k, v)
- return sa.run()
-session_auth.__doc__ = """Session authentication hook.
-
-Any attribute of the SessionAuth class may be overridden via a keyword arg
-to this function:
-
-""" + "\n".join(["%s: %s" % (k, type(getattr(SessionAuth, k)).__name__)
- for k in dir(SessionAuth) if not k.startswith("__")])
-
-
-def log_traceback(severity=logging.ERROR, debug=False):
- """Write the last error's traceback to the cherrypy error log."""
- cherrypy.log("", "HTTP", severity=severity, traceback=True)
-
-def log_request_headers(debug=False):
- """Write request headers to the cherrypy error log."""
- h = [" %s: %s" % (k, v) for k, v in cherrypy.serving.request.header_list]
- cherrypy.log('\nRequest Headers:\n' + '\n'.join(h), "HTTP")
-
-def log_hooks(debug=False):
- """Write request.hooks to the cherrypy error log."""
- request = cherrypy.serving.request
-
- msg = []
- # Sort by the standard points if possible.
- from cherrypy import _cprequest
- points = _cprequest.hookpoints
- for k in request.hooks.keys():
- if k not in points:
- points.append(k)
-
- for k in points:
- msg.append(" %s:" % k)
- v = request.hooks.get(k, [])
- v.sort()
- for h in v:
- msg.append(" %r" % h)
- cherrypy.log('\nRequest Hooks for ' + cherrypy.url() +
- ':\n' + '\n'.join(msg), "HTTP")
-
-def redirect(url='', internal=True, debug=False):
- """Raise InternalRedirect or HTTPRedirect to the given url."""
- if debug:
- cherrypy.log('Redirecting %sto: %s' %
- ({True: 'internal ', False: ''}[internal], url),
- 'TOOLS.REDIRECT')
- if internal:
- raise cherrypy.InternalRedirect(url)
- else:
- raise cherrypy.HTTPRedirect(url)
-
-def trailing_slash(missing=True, extra=False, status=None, debug=False):
- """Redirect if path_info has (missing|extra) trailing slash."""
- request = cherrypy.serving.request
- pi = request.path_info
-
- if debug:
- cherrypy.log('is_index: %r, missing: %r, extra: %r, path_info: %r' %
- (request.is_index, missing, extra, pi),
- 'TOOLS.TRAILING_SLASH')
- if request.is_index is True:
- if missing:
- if not pi.endswith('/'):
- new_url = cherrypy.url(pi + '/', request.query_string)
- raise cherrypy.HTTPRedirect(new_url, status=status or 301)
- elif request.is_index is False:
- if extra:
- # If pi == '/', don't redirect to ''!
- if pi.endswith('/') and pi != '/':
- new_url = cherrypy.url(pi[:-1], request.query_string)
- raise cherrypy.HTTPRedirect(new_url, status=status or 301)
-
-def flatten(debug=False):
- """Wrap response.body in a generator that recursively iterates over body.
-
- This allows cherrypy.response.body to consist of 'nested generators';
- that is, a set of generators that yield generators.
- """
- import types
- def flattener(input):
- numchunks = 0
- for x in input:
- if not isinstance(x, types.GeneratorType):
- numchunks += 1
- yield x
- else:
- for y in flattener(x):
- numchunks += 1
- yield y
- if debug:
- cherrypy.log('Flattened %d chunks' % numchunks, 'TOOLS.FLATTEN')
- response = cherrypy.serving.response
- response.body = flattener(response.body)
-
-
-def accept(media=None, debug=False):
- """Return the client's preferred media-type (from the given Content-Types).
-
- If 'media' is None (the default), no test will be performed.
-
- If 'media' is provided, it should be the Content-Type value (as a string)
- or values (as a list or tuple of strings) which the current resource
- can emit. The client's acceptable media ranges (as declared in the
- Accept request header) will be matched in order to these Content-Type
- values; the first such string is returned. That is, the return value
- will always be one of the strings provided in the 'media' arg (or None
- if 'media' is None).
-
- If no match is found, then HTTPError 406 (Not Acceptable) is raised.
- Note that most web browsers send */* as a (low-quality) acceptable
- media range, which should match any Content-Type. In addition, "...if
- no Accept header field is present, then it is assumed that the client
- accepts all media types."
-
- Matching types are checked in order of client preference first,
- and then in the order of the given 'media' values.
-
- Note that this function does not honor accept-params (other than "q").
- """
- if not media:
- return
- if isinstance(media, basestring):
- media = [media]
- request = cherrypy.serving.request
-
- # Parse the Accept request header, and try to match one
- # of the requested media-ranges (in order of preference).
- ranges = request.headers.elements('Accept')
- if not ranges:
- # Any media type is acceptable.
- if debug:
- cherrypy.log('No Accept header elements', 'TOOLS.ACCEPT')
- return media[0]
- else:
- # Note that 'ranges' is sorted in order of preference
- for element in ranges:
- if element.qvalue > 0:
- if element.value == "*/*":
- # Matches any type or subtype
- if debug:
- cherrypy.log('Match due to */*', 'TOOLS.ACCEPT')
- return media[0]
- elif element.value.endswith("/*"):
- # Matches any subtype
- mtype = element.value[:-1] # Keep the slash
- for m in media:
- if m.startswith(mtype):
- if debug:
- cherrypy.log('Match due to %s' % element.value,
- 'TOOLS.ACCEPT')
- return m
- else:
- # Matches exact value
- if element.value in media:
- if debug:
- cherrypy.log('Match due to %s' % element.value,
- 'TOOLS.ACCEPT')
- return element.value
-
- # No suitable media-range found.
- ah = request.headers.get('Accept')
- if ah is None:
- msg = "Your client did not send an Accept header."
- else:
- msg = "Your client sent this Accept header: %s." % ah
- msg += (" But this resource only emits these media types: %s." %
- ", ".join(media))
- raise cherrypy.HTTPError(406, msg)
-
-
-class MonitoredHeaderMap(_httputil.HeaderMap):
-
- def __init__(self):
- self.accessed_headers = set()
-
- def __getitem__(self, key):
- self.accessed_headers.add(key)
- return _httputil.HeaderMap.__getitem__(self, key)
-
- def __contains__(self, key):
- self.accessed_headers.add(key)
- return _httputil.HeaderMap.__contains__(self, key)
-
- def get(self, key, default=None):
- self.accessed_headers.add(key)
- return _httputil.HeaderMap.get(self, key, default=default)
-
- if hasattr({}, 'has_key'):
- # Python 2
- def has_key(self, key):
- self.accessed_headers.add(key)
- return _httputil.HeaderMap.has_key(self, key)
-
-
-def autovary(ignore=None, debug=False):
- """Auto-populate the Vary response header based on request.header access."""
- request = cherrypy.serving.request
-
- req_h = request.headers
- request.headers = MonitoredHeaderMap()
- request.headers.update(req_h)
- if ignore is None:
- ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type'])
-
- def set_response_header():
- resp_h = cherrypy.serving.response.headers
- v = set([e.value for e in resp_h.elements('Vary')])
- if debug:
- cherrypy.log('Accessed headers: %s' % request.headers.accessed_headers,
- 'TOOLS.AUTOVARY')
- v = v.union(request.headers.accessed_headers)
- v = v.difference(ignore)
- v = list(v)
- v.sort()
- resp_h['Vary'] = ', '.join(v)
- request.hooks.attach('before_finalize', set_response_header, 95)
-
diff --git a/python-packages/cherrypy/lib/encoding.py b/python-packages/cherrypy/lib/encoding.py
deleted file mode 100644
index 6459746509..0000000000
--- a/python-packages/cherrypy/lib/encoding.py
+++ /dev/null
@@ -1,388 +0,0 @@
-import struct
-import time
-
-import cherrypy
-from cherrypy._cpcompat import basestring, BytesIO, ntob, set, unicodestr
-from cherrypy.lib import file_generator
-from cherrypy.lib import set_vary_header
-
-
-def decode(encoding=None, default_encoding='utf-8'):
- """Replace or extend the list of charsets used to decode a request entity.
-
- Either argument may be a single string or a list of strings.
-
- encoding
- If not None, restricts the set of charsets attempted while decoding
- a request entity to the given set (even if a different charset is given in
- the Content-Type request header).
-
- default_encoding
- Only in effect if the 'encoding' argument is not given.
- If given, the set of charsets attempted while decoding a request entity is
- *extended* with the given value(s).
-
- """
- body = cherrypy.request.body
- if encoding is not None:
- if not isinstance(encoding, list):
- encoding = [encoding]
- body.attempt_charsets = encoding
- elif default_encoding:
- if not isinstance(default_encoding, list):
- default_encoding = [default_encoding]
- body.attempt_charsets = body.attempt_charsets + default_encoding
-
-
-class ResponseEncoder:
-
- default_encoding = 'utf-8'
- failmsg = "Response body could not be encoded with %r."
- encoding = None
- errors = 'strict'
- text_only = True
- add_charset = True
- debug = False
-
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- setattr(self, k, v)
-
- self.attempted_charsets = set()
- request = cherrypy.serving.request
- if request.handler is not None:
- # Replace request.handler with self
- if self.debug:
- cherrypy.log('Replacing request.handler', 'TOOLS.ENCODE')
- self.oldhandler = request.handler
- request.handler = self
-
- def encode_stream(self, encoding):
- """Encode a streaming response body.
-
- Use a generator wrapper, and just pray it works as the stream is
- being written out.
- """
- if encoding in self.attempted_charsets:
- return False
- self.attempted_charsets.add(encoding)
-
- def encoder(body):
- for chunk in body:
- if isinstance(chunk, unicodestr):
- chunk = chunk.encode(encoding, self.errors)
- yield chunk
- self.body = encoder(self.body)
- return True
-
- def encode_string(self, encoding):
- """Encode a buffered response body."""
- if encoding in self.attempted_charsets:
- return False
- self.attempted_charsets.add(encoding)
-
- try:
- body = []
- for chunk in self.body:
- if isinstance(chunk, unicodestr):
- chunk = chunk.encode(encoding, self.errors)
- body.append(chunk)
- self.body = body
- except (LookupError, UnicodeError):
- return False
- else:
- return True
-
- def find_acceptable_charset(self):
- request = cherrypy.serving.request
- response = cherrypy.serving.response
-
- if self.debug:
- cherrypy.log('response.stream %r' % response.stream, 'TOOLS.ENCODE')
- if response.stream:
- encoder = self.encode_stream
- else:
- encoder = self.encode_string
- if "Content-Length" in response.headers:
- # Delete Content-Length header so finalize() recalcs it.
- # Encoded strings may be of different lengths from their
- # unicode equivalents, and even from each other. For example:
- # >>> t = u"\u7007\u3040"
- # >>> len(t)
- # 2
- # >>> len(t.encode("UTF-8"))
- # 6
- # >>> len(t.encode("utf7"))
- # 8
- del response.headers["Content-Length"]
-
- # Parse the Accept-Charset request header, and try to provide one
- # of the requested charsets (in order of user preference).
- encs = request.headers.elements('Accept-Charset')
- charsets = [enc.value.lower() for enc in encs]
- if self.debug:
- cherrypy.log('charsets %s' % repr(charsets), 'TOOLS.ENCODE')
-
- if self.encoding is not None:
- # If specified, force this encoding to be used, or fail.
- encoding = self.encoding.lower()
- if self.debug:
- cherrypy.log('Specified encoding %r' % encoding, 'TOOLS.ENCODE')
- if (not charsets) or "*" in charsets or encoding in charsets:
- if self.debug:
- cherrypy.log('Attempting encoding %r' % encoding, 'TOOLS.ENCODE')
- if encoder(encoding):
- return encoding
- else:
- if not encs:
- if self.debug:
- cherrypy.log('Attempting default encoding %r' %
- self.default_encoding, 'TOOLS.ENCODE')
- # Any character-set is acceptable.
- if encoder(self.default_encoding):
- return self.default_encoding
- else:
- raise cherrypy.HTTPError(500, self.failmsg % self.default_encoding)
- else:
- for element in encs:
- if element.qvalue > 0:
- if element.value == "*":
- # Matches any charset. Try our default.
- if self.debug:
- cherrypy.log('Attempting default encoding due '
- 'to %r' % element, 'TOOLS.ENCODE')
- if encoder(self.default_encoding):
- return self.default_encoding
- else:
- encoding = element.value
- if self.debug:
- cherrypy.log('Attempting encoding %s (qvalue >'
- '0)' % element, 'TOOLS.ENCODE')
- if encoder(encoding):
- return encoding
-
- if "*" not in charsets:
- # If no "*" is present in an Accept-Charset field, then all
- # character sets not explicitly mentioned get a quality
- # value of 0, except for ISO-8859-1, which gets a quality
- # value of 1 if not explicitly mentioned.
- iso = 'iso-8859-1'
- if iso not in charsets:
- if self.debug:
- cherrypy.log('Attempting ISO-8859-1 encoding',
- 'TOOLS.ENCODE')
- if encoder(iso):
- return iso
-
- # No suitable encoding found.
- ac = request.headers.get('Accept-Charset')
- if ac is None:
- msg = "Your client did not send an Accept-Charset header."
- else:
- msg = "Your client sent this Accept-Charset header: %s." % ac
- msg += " We tried these charsets: %s." % ", ".join(self.attempted_charsets)
- raise cherrypy.HTTPError(406, msg)
-
- def __call__(self, *args, **kwargs):
- response = cherrypy.serving.response
- self.body = self.oldhandler(*args, **kwargs)
-
- if isinstance(self.body, basestring):
- # strings get wrapped in a list because iterating over a single
- # item list is much faster than iterating over every character
- # in a long string.
- if self.body:
- self.body = [self.body]
- else:
- # [''] doesn't evaluate to False, so replace it with [].
- self.body = []
- elif hasattr(self.body, 'read'):
- self.body = file_generator(self.body)
- elif self.body is None:
- self.body = []
-
- ct = response.headers.elements("Content-Type")
- if self.debug:
- cherrypy.log('Content-Type: %r' % [str(h) for h in ct], 'TOOLS.ENCODE')
- if ct:
- ct = ct[0]
- if self.text_only:
- if ct.value.lower().startswith("text/"):
- if self.debug:
- cherrypy.log('Content-Type %s starts with "text/"' % ct,
- 'TOOLS.ENCODE')
- do_find = True
- else:
- if self.debug:
- cherrypy.log('Not finding because Content-Type %s does '
- 'not start with "text/"' % ct,
- 'TOOLS.ENCODE')
- do_find = False
- else:
- if self.debug:
- cherrypy.log('Finding because not text_only', 'TOOLS.ENCODE')
- do_find = True
-
- if do_find:
- # Set "charset=..." param on response Content-Type header
- ct.params['charset'] = self.find_acceptable_charset()
- if self.add_charset:
- if self.debug:
- cherrypy.log('Setting Content-Type %s' % ct,
- 'TOOLS.ENCODE')
- response.headers["Content-Type"] = str(ct)
-
- return self.body
-
-# GZIP
-
-def compress(body, compress_level):
- """Compress 'body' at the given compress_level."""
- import zlib
-
- # See http://www.gzip.org/zlib/rfc-gzip.html
- yield ntob('\x1f\x8b') # ID1 and ID2: gzip marker
- yield ntob('\x08') # CM: compression method
- yield ntob('\x00') # FLG: none set
- # MTIME: 4 bytes
- yield struct.pack(" 0 is present
- * The 'identity' value is given with a qvalue > 0.
-
- """
- request = cherrypy.serving.request
- response = cherrypy.serving.response
-
- set_vary_header(response, "Accept-Encoding")
-
- if not response.body:
- # Response body is empty (might be a 304 for instance)
- if debug:
- cherrypy.log('No response body', context='TOOLS.GZIP')
- return
-
- # If returning cached content (which should already have been gzipped),
- # don't re-zip.
- if getattr(request, "cached", False):
- if debug:
- cherrypy.log('Not gzipping cached response', context='TOOLS.GZIP')
- return
-
- acceptable = request.headers.elements('Accept-Encoding')
- if not acceptable:
- # If no Accept-Encoding field is present in a request,
- # the server MAY assume that the client will accept any
- # content coding. In this case, if "identity" is one of
- # the available content-codings, then the server SHOULD use
- # the "identity" content-coding, unless it has additional
- # information that a different content-coding is meaningful
- # to the client.
- if debug:
- cherrypy.log('No Accept-Encoding', context='TOOLS.GZIP')
- return
-
- ct = response.headers.get('Content-Type', '').split(';')[0]
- for coding in acceptable:
- if coding.value == 'identity' and coding.qvalue != 0:
- if debug:
- cherrypy.log('Non-zero identity qvalue: %s' % coding,
- context='TOOLS.GZIP')
- return
- if coding.value in ('gzip', 'x-gzip'):
- if coding.qvalue == 0:
- if debug:
- cherrypy.log('Zero gzip qvalue: %s' % coding,
- context='TOOLS.GZIP')
- return
-
- if ct not in mime_types:
- # If the list of provided mime-types contains tokens
- # such as 'text/*' or 'application/*+xml',
- # we go through them and find the most appropriate one
- # based on the given content-type.
- # The pattern matching is only caring about the most
- # common cases, as stated above, and doesn't support
- # for extra parameters.
- found = False
- if '/' in ct:
- ct_media_type, ct_sub_type = ct.split('/')
- for mime_type in mime_types:
- if '/' in mime_type:
- media_type, sub_type = mime_type.split('/')
- if ct_media_type == media_type:
- if sub_type == '*':
- found = True
- break
- elif '+' in sub_type and '+' in ct_sub_type:
- ct_left, ct_right = ct_sub_type.split('+')
- left, right = sub_type.split('+')
- if left == '*' and ct_right == right:
- found = True
- break
-
- if not found:
- if debug:
- cherrypy.log('Content-Type %s not in mime_types %r' %
- (ct, mime_types), context='TOOLS.GZIP')
- return
-
- if debug:
- cherrypy.log('Gzipping', context='TOOLS.GZIP')
- # Return a generator that compresses the page
- response.headers['Content-Encoding'] = 'gzip'
- response.body = compress(response.body, compress_level)
- if "Content-Length" in response.headers:
- # Delete Content-Length header so finalize() recalcs it.
- del response.headers["Content-Length"]
-
- return
-
- if debug:
- cherrypy.log('No acceptable encoding found.', context='GZIP')
- cherrypy.HTTPError(406, "identity, gzip").set_response()
-
diff --git a/python-packages/cherrypy/lib/gctools.py b/python-packages/cherrypy/lib/gctools.py
deleted file mode 100644
index 183148b212..0000000000
--- a/python-packages/cherrypy/lib/gctools.py
+++ /dev/null
@@ -1,214 +0,0 @@
-import gc
-import inspect
-import os
-import sys
-import time
-
-try:
- import objgraph
-except ImportError:
- objgraph = None
-
-import cherrypy
-from cherrypy import _cprequest, _cpwsgi
-from cherrypy.process.plugins import SimplePlugin
-
-
-class ReferrerTree(object):
- """An object which gathers all referrers of an object to a given depth."""
-
- peek_length = 40
-
- def __init__(self, ignore=None, maxdepth=2, maxparents=10):
- self.ignore = ignore or []
- self.ignore.append(inspect.currentframe().f_back)
- self.maxdepth = maxdepth
- self.maxparents = maxparents
-
- def ascend(self, obj, depth=1):
- """Return a nested list containing referrers of the given object."""
- depth += 1
- parents = []
-
- # Gather all referrers in one step to minimize
- # cascading references due to repr() logic.
- refs = gc.get_referrers(obj)
- self.ignore.append(refs)
- if len(refs) > self.maxparents:
- return [("[%s referrers]" % len(refs), [])]
-
- try:
- ascendcode = self.ascend.__code__
- except AttributeError:
- ascendcode = self.ascend.im_func.func_code
- for parent in refs:
- if inspect.isframe(parent) and parent.f_code is ascendcode:
- continue
- if parent in self.ignore:
- continue
- if depth <= self.maxdepth:
- parents.append((parent, self.ascend(parent, depth)))
- else:
- parents.append((parent, []))
-
- return parents
-
- def peek(self, s):
- """Return s, restricted to a sane length."""
- if len(s) > (self.peek_length + 3):
- half = self.peek_length // 2
- return s[:half] + '...' + s[-half:]
- else:
- return s
-
- def _format(self, obj, descend=True):
- """Return a string representation of a single object."""
- if inspect.isframe(obj):
- filename, lineno, func, context, index = inspect.getframeinfo(obj)
- return "" % func
-
- if not descend:
- return self.peek(repr(obj))
-
- if isinstance(obj, dict):
- return "{" + ", ".join(["%s: %s" % (self._format(k, descend=False),
- self._format(v, descend=False))
- for k, v in obj.items()]) + "}"
- elif isinstance(obj, list):
- return "[" + ", ".join([self._format(item, descend=False)
- for item in obj]) + "]"
- elif isinstance(obj, tuple):
- return "(" + ", ".join([self._format(item, descend=False)
- for item in obj]) + ")"
-
- r = self.peek(repr(obj))
- if isinstance(obj, (str, int, float)):
- return r
- return "%s: %s" % (type(obj), r)
-
- def format(self, tree):
- """Return a list of string reprs from a nested list of referrers."""
- output = []
- def ascend(branch, depth=1):
- for parent, grandparents in branch:
- output.append((" " * depth) + self._format(parent))
- if grandparents:
- ascend(grandparents, depth + 1)
- ascend(tree)
- return output
-
-
-def get_instances(cls):
- return [x for x in gc.get_objects() if isinstance(x, cls)]
-
-
-class RequestCounter(SimplePlugin):
-
- def start(self):
- self.count = 0
-
- def before_request(self):
- self.count += 1
-
- def after_request(self):
- self.count -=1
-request_counter = RequestCounter(cherrypy.engine)
-request_counter.subscribe()
-
-
-def get_context(obj):
- if isinstance(obj, _cprequest.Request):
- return "path=%s;stage=%s" % (obj.path_info, obj.stage)
- elif isinstance(obj, _cprequest.Response):
- return "status=%s" % obj.status
- elif isinstance(obj, _cpwsgi.AppResponse):
- return "PATH_INFO=%s" % obj.environ.get('PATH_INFO', '')
- elif hasattr(obj, "tb_lineno"):
- return "tb_lineno=%s" % obj.tb_lineno
- return ""
-
-
-class GCRoot(object):
- """A CherryPy page handler for testing reference leaks."""
-
- classes = [(_cprequest.Request, 2, 2,
- "Should be 1 in this request thread and 1 in the main thread."),
- (_cprequest.Response, 2, 2,
- "Should be 1 in this request thread and 1 in the main thread."),
- (_cpwsgi.AppResponse, 1, 1,
- "Should be 1 in this request thread only."),
- ]
-
- def index(self):
- return "Hello, world!"
- index.exposed = True
-
- def stats(self):
- output = ["Statistics:"]
-
- for trial in range(10):
- if request_counter.count > 0:
- break
- time.sleep(0.5)
- else:
- output.append("\nNot all requests closed properly.")
-
- # gc_collect isn't perfectly synchronous, because it may
- # break reference cycles that then take time to fully
- # finalize. Call it thrice and hope for the best.
- gc.collect()
- gc.collect()
- unreachable = gc.collect()
- if unreachable:
- if objgraph is not None:
- final = objgraph.by_type('Nondestructible')
- if final:
- objgraph.show_backrefs(final, filename='finalizers.png')
-
- trash = {}
- for x in gc.garbage:
- trash[type(x)] = trash.get(type(x), 0) + 1
- if trash:
- output.insert(0, "\n%s unreachable objects:" % unreachable)
- trash = [(v, k) for k, v in trash.items()]
- trash.sort()
- for pair in trash:
- output.append(" " + repr(pair))
-
- # Check declared classes to verify uncollected instances.
- # These don't have to be part of a cycle; they can be
- # any objects that have unanticipated referrers that keep
- # them from being collected.
- allobjs = {}
- for cls, minobj, maxobj, msg in self.classes:
- allobjs[cls] = get_instances(cls)
-
- for cls, minobj, maxobj, msg in self.classes:
- objs = allobjs[cls]
- lenobj = len(objs)
- if lenobj < minobj or lenobj > maxobj:
- if minobj == maxobj:
- output.append(
- "\nExpected %s %r references, got %s." %
- (minobj, cls, lenobj))
- else:
- output.append(
- "\nExpected %s to %s %r references, got %s." %
- (minobj, maxobj, cls, lenobj))
-
- for obj in objs:
- if objgraph is not None:
- ig = [id(objs), id(inspect.currentframe())]
- fname = "graph_%s_%s.png" % (cls.__name__, id(obj))
- objgraph.show_backrefs(
- obj, extra_ignore=ig, max_depth=4, too_many=20,
- filename=fname, extra_info=get_context)
- output.append("\nReferrers for %s (refcount=%s):" %
- (repr(obj), sys.getrefcount(obj)))
- t = ReferrerTree(ignore=[objs], maxdepth=3)
- tree = t.ascend(obj)
- output.extend(t.format(tree))
-
- return "\n".join(output)
- stats.exposed = True
-
diff --git a/python-packages/cherrypy/lib/http.py b/python-packages/cherrypy/lib/http.py
deleted file mode 100644
index 4661d69e28..0000000000
--- a/python-packages/cherrypy/lib/http.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import warnings
-warnings.warn('cherrypy.lib.http has been deprecated and will be removed '
- 'in CherryPy 3.3 use cherrypy.lib.httputil instead.',
- DeprecationWarning)
-
-from cherrypy.lib.httputil import *
-
diff --git a/python-packages/cherrypy/lib/httpauth.py b/python-packages/cherrypy/lib/httpauth.py
deleted file mode 100644
index ad7c6eba7c..0000000000
--- a/python-packages/cherrypy/lib/httpauth.py
+++ /dev/null
@@ -1,354 +0,0 @@
-"""
-This module defines functions to implement HTTP Digest Authentication (:rfc:`2617`).
-This has full compliance with 'Digest' and 'Basic' authentication methods. In
-'Digest' it supports both MD5 and MD5-sess algorithms.
-
-Usage:
- First use 'doAuth' to request the client authentication for a
- certain resource. You should send an httplib.UNAUTHORIZED response to the
- client so he knows he has to authenticate itself.
-
- Then use 'parseAuthorization' to retrieve the 'auth_map' used in
- 'checkResponse'.
-
- To use 'checkResponse' you must have already verified the password associated
- with the 'username' key in 'auth_map' dict. Then you use the 'checkResponse'
- function to verify if the password matches the one sent by the client.
-
-SUPPORTED_ALGORITHM - list of supported 'Digest' algorithms
-SUPPORTED_QOP - list of supported 'Digest' 'qop'.
-"""
-__version__ = 1, 0, 1
-__author__ = "Tiago Cogumbreiro "
-__credits__ = """
- Peter van Kampen for its recipe which implement most of Digest authentication:
- http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/302378
-"""
-
-__license__ = """
-Copyright (c) 2005, Tiago Cogumbreiro
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
- * Neither the name of Sylvain Hellegouarch nor the names of his contributors
- may be used to endorse or promote products derived from this software
- without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""
-
-__all__ = ("digestAuth", "basicAuth", "doAuth", "checkResponse",
- "parseAuthorization", "SUPPORTED_ALGORITHM", "md5SessionKey",
- "calculateNonce", "SUPPORTED_QOP")
-
-################################################################################
-import time
-from cherrypy._cpcompat import base64_decode, ntob, md5
-from cherrypy._cpcompat import parse_http_list, parse_keqv_list
-
-MD5 = "MD5"
-MD5_SESS = "MD5-sess"
-AUTH = "auth"
-AUTH_INT = "auth-int"
-
-SUPPORTED_ALGORITHM = (MD5, MD5_SESS)
-SUPPORTED_QOP = (AUTH, AUTH_INT)
-
-################################################################################
-# doAuth
-#
-DIGEST_AUTH_ENCODERS = {
- MD5: lambda val: md5(ntob(val)).hexdigest(),
- MD5_SESS: lambda val: md5(ntob(val)).hexdigest(),
-# SHA: lambda val: sha.new(ntob(val)).hexdigest (),
-}
-
-def calculateNonce (realm, algorithm = MD5):
- """This is an auxaliary function that calculates 'nonce' value. It is used
- to handle sessions."""
-
- global SUPPORTED_ALGORITHM, DIGEST_AUTH_ENCODERS
- assert algorithm in SUPPORTED_ALGORITHM
-
- try:
- encoder = DIGEST_AUTH_ENCODERS[algorithm]
- except KeyError:
- raise NotImplementedError ("The chosen algorithm (%s) does not have "\
- "an implementation yet" % algorithm)
-
- return encoder ("%d:%s" % (time.time(), realm))
-
-def digestAuth (realm, algorithm = MD5, nonce = None, qop = AUTH):
- """Challenges the client for a Digest authentication."""
- global SUPPORTED_ALGORITHM, DIGEST_AUTH_ENCODERS, SUPPORTED_QOP
- assert algorithm in SUPPORTED_ALGORITHM
- assert qop in SUPPORTED_QOP
-
- if nonce is None:
- nonce = calculateNonce (realm, algorithm)
-
- return 'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"' % (
- realm, nonce, algorithm, qop
- )
-
-def basicAuth (realm):
- """Challengenes the client for a Basic authentication."""
- assert '"' not in realm, "Realms cannot contain the \" (quote) character."
-
- return 'Basic realm="%s"' % realm
-
-def doAuth (realm):
- """'doAuth' function returns the challenge string b giving priority over
- Digest and fallback to Basic authentication when the browser doesn't
- support the first one.
-
- This should be set in the HTTP header under the key 'WWW-Authenticate'."""
-
- return digestAuth (realm) + " " + basicAuth (realm)
-
-
-################################################################################
-# Parse authorization parameters
-#
-def _parseDigestAuthorization (auth_params):
- # Convert the auth params to a dict
- items = parse_http_list(auth_params)
- params = parse_keqv_list(items)
-
- # Now validate the params
-
- # Check for required parameters
- required = ["username", "realm", "nonce", "uri", "response"]
- for k in required:
- if k not in params:
- return None
-
- # If qop is sent then cnonce and nc MUST be present
- if "qop" in params and not ("cnonce" in params \
- and "nc" in params):
- return None
-
- # If qop is not sent, neither cnonce nor nc can be present
- if ("cnonce" in params or "nc" in params) and \
- "qop" not in params:
- return None
-
- return params
-
-
-def _parseBasicAuthorization (auth_params):
- username, password = base64_decode(auth_params).split(":", 1)
- return {"username": username, "password": password}
-
-AUTH_SCHEMES = {
- "basic": _parseBasicAuthorization,
- "digest": _parseDigestAuthorization,
-}
-
-def parseAuthorization (credentials):
- """parseAuthorization will convert the value of the 'Authorization' key in
- the HTTP header to a map itself. If the parsing fails 'None' is returned.
- """
-
- global AUTH_SCHEMES
-
- auth_scheme, auth_params = credentials.split(" ", 1)
- auth_scheme = auth_scheme.lower ()
-
- parser = AUTH_SCHEMES[auth_scheme]
- params = parser (auth_params)
-
- if params is None:
- return
-
- assert "auth_scheme" not in params
- params["auth_scheme"] = auth_scheme
- return params
-
-
-################################################################################
-# Check provided response for a valid password
-#
-def md5SessionKey (params, password):
- """
- If the "algorithm" directive's value is "MD5-sess", then A1
- [the session key] is calculated only once - on the first request by the
- client following receipt of a WWW-Authenticate challenge from the server.
-
- This creates a 'session key' for the authentication of subsequent
- requests and responses which is different for each "authentication
- session", thus limiting the amount of material hashed with any one
- key.
-
- Because the server need only use the hash of the user
- credentials in order to create the A1 value, this construction could
- be used in conjunction with a third party authentication service so
- that the web server would not need the actual password value. The
- specification of such a protocol is beyond the scope of this
- specification.
-"""
-
- keys = ("username", "realm", "nonce", "cnonce")
- params_copy = {}
- for key in keys:
- params_copy[key] = params[key]
-
- params_copy["algorithm"] = MD5_SESS
- return _A1 (params_copy, password)
-
-def _A1(params, password):
- algorithm = params.get ("algorithm", MD5)
- H = DIGEST_AUTH_ENCODERS[algorithm]
-
- if algorithm == MD5:
- # If the "algorithm" directive's value is "MD5" or is
- # unspecified, then A1 is:
- # A1 = unq(username-value) ":" unq(realm-value) ":" passwd
- return "%s:%s:%s" % (params["username"], params["realm"], password)
-
- elif algorithm == MD5_SESS:
-
- # This is A1 if qop is set
- # A1 = H( unq(username-value) ":" unq(realm-value) ":" passwd )
- # ":" unq(nonce-value) ":" unq(cnonce-value)
- h_a1 = H ("%s:%s:%s" % (params["username"], params["realm"], password))
- return "%s:%s:%s" % (h_a1, params["nonce"], params["cnonce"])
-
-
-def _A2(params, method, kwargs):
- # If the "qop" directive's value is "auth" or is unspecified, then A2 is:
- # A2 = Method ":" digest-uri-value
-
- qop = params.get ("qop", "auth")
- if qop == "auth":
- return method + ":" + params["uri"]
- elif qop == "auth-int":
- # If the "qop" value is "auth-int", then A2 is:
- # A2 = Method ":" digest-uri-value ":" H(entity-body)
- entity_body = kwargs.get ("entity_body", "")
- H = kwargs["H"]
-
- return "%s:%s:%s" % (
- method,
- params["uri"],
- H(entity_body)
- )
-
- else:
- raise NotImplementedError ("The 'qop' method is unknown: %s" % qop)
-
-def _computeDigestResponse(auth_map, password, method = "GET", A1 = None,**kwargs):
- """
- Generates a response respecting the algorithm defined in RFC 2617
- """
- params = auth_map
-
- algorithm = params.get ("algorithm", MD5)
-
- H = DIGEST_AUTH_ENCODERS[algorithm]
- KD = lambda secret, data: H(secret + ":" + data)
-
- qop = params.get ("qop", None)
-
- H_A2 = H(_A2(params, method, kwargs))
-
- if algorithm == MD5_SESS and A1 is not None:
- H_A1 = H(A1)
- else:
- H_A1 = H(_A1(params, password))
-
- if qop in ("auth", "auth-int"):
- # If the "qop" value is "auth" or "auth-int":
- # request-digest = <"> < KD ( H(A1), unq(nonce-value)
- # ":" nc-value
- # ":" unq(cnonce-value)
- # ":" unq(qop-value)
- # ":" H(A2)
- # ) <">
- request = "%s:%s:%s:%s:%s" % (
- params["nonce"],
- params["nc"],
- params["cnonce"],
- params["qop"],
- H_A2,
- )
- elif qop is None:
- # If the "qop" directive is not present (this construction is
- # for compatibility with RFC 2069):
- # request-digest =
- # <"> < KD ( H(A1), unq(nonce-value) ":" H(A2) ) > <">
- request = "%s:%s" % (params["nonce"], H_A2)
-
- return KD(H_A1, request)
-
-def _checkDigestResponse(auth_map, password, method = "GET", A1 = None, **kwargs):
- """This function is used to verify the response given by the client when
- he tries to authenticate.
- Optional arguments:
- entity_body - when 'qop' is set to 'auth-int' you MUST provide the
- raw data you are going to send to the client (usually the
- HTML page.
- request_uri - the uri from the request line compared with the 'uri'
- directive of the authorization map. They must represent
- the same resource (unused at this time).
- """
-
- if auth_map['realm'] != kwargs.get('realm', None):
- return False
-
- response = _computeDigestResponse(auth_map, password, method, A1,**kwargs)
-
- return response == auth_map["response"]
-
-def _checkBasicResponse (auth_map, password, method='GET', encrypt=None, **kwargs):
- # Note that the Basic response doesn't provide the realm value so we cannot
- # test it
- try:
- return encrypt(auth_map["password"], auth_map["username"]) == password
- except TypeError:
- return encrypt(auth_map["password"]) == password
-
-AUTH_RESPONSES = {
- "basic": _checkBasicResponse,
- "digest": _checkDigestResponse,
-}
-
-def checkResponse (auth_map, password, method = "GET", encrypt=None, **kwargs):
- """'checkResponse' compares the auth_map with the password and optionally
- other arguments that each implementation might need.
-
- If the response is of type 'Basic' then the function has the following
- signature::
-
- checkBasicResponse (auth_map, password) -> bool
-
- If the response is of type 'Digest' then the function has the following
- signature::
-
- checkDigestResponse (auth_map, password, method = 'GET', A1 = None) -> bool
-
- The 'A1' argument is only used in MD5_SESS algorithm based responses.
- Check md5SessionKey() for more info.
- """
- checker = AUTH_RESPONSES[auth_map["auth_scheme"]]
- return checker (auth_map, password, method=method, encrypt=encrypt, **kwargs)
-
-
-
-
diff --git a/python-packages/cherrypy/lib/httputil.py b/python-packages/cherrypy/lib/httputil.py
deleted file mode 100644
index 5f77d54748..0000000000
--- a/python-packages/cherrypy/lib/httputil.py
+++ /dev/null
@@ -1,506 +0,0 @@
-"""HTTP library functions.
-
-This module contains functions for building an HTTP application
-framework: any one, not just one whose name starts with "Ch". ;) If you
-reference any modules from some popular framework inside *this* module,
-FuManChu will personally hang you up by your thumbs and submit you
-to a public caning.
-"""
-
-from binascii import b2a_base64
-from cherrypy._cpcompat import BaseHTTPRequestHandler, HTTPDate, ntob, ntou, reversed, sorted
-from cherrypy._cpcompat import basestring, bytestr, iteritems, nativestr, unicodestr, unquote_qs
-response_codes = BaseHTTPRequestHandler.responses.copy()
-
-# From http://www.cherrypy.org/ticket/361
-response_codes[500] = ('Internal Server Error',
- 'The server encountered an unexpected condition '
- 'which prevented it from fulfilling the request.')
-response_codes[503] = ('Service Unavailable',
- 'The server is currently unable to handle the '
- 'request due to a temporary overloading or '
- 'maintenance of the server.')
-
-import re
-import urllib
-
-
-
-def urljoin(*atoms):
- """Return the given path \*atoms, joined into a single URL.
-
- This will correctly join a SCRIPT_NAME and PATH_INFO into the
- original URL, even if either atom is blank.
- """
- url = "/".join([x for x in atoms if x])
- while "//" in url:
- url = url.replace("//", "/")
- # Special-case the final url of "", and return "/" instead.
- return url or "/"
-
-def urljoin_bytes(*atoms):
- """Return the given path *atoms, joined into a single URL.
-
- This will correctly join a SCRIPT_NAME and PATH_INFO into the
- original URL, even if either atom is blank.
- """
- url = ntob("/").join([x for x in atoms if x])
- while ntob("//") in url:
- url = url.replace(ntob("//"), ntob("/"))
- # Special-case the final url of "", and return "/" instead.
- return url or ntob("/")
-
-def protocol_from_http(protocol_str):
- """Return a protocol tuple from the given 'HTTP/x.y' string."""
- return int(protocol_str[5]), int(protocol_str[7])
-
-def get_ranges(headervalue, content_length):
- """Return a list of (start, stop) indices from a Range header, or None.
-
- Each (start, stop) tuple will be composed of two ints, which are suitable
- for use in a slicing operation. That is, the header "Range: bytes=3-6",
- if applied against a Python string, is requesting resource[3:7]. This
- function will return the list [(3, 7)].
-
- If this function returns an empty list, you should return HTTP 416.
- """
-
- if not headervalue:
- return None
-
- result = []
- bytesunit, byteranges = headervalue.split("=", 1)
- for brange in byteranges.split(","):
- start, stop = [x.strip() for x in brange.split("-", 1)]
- if start:
- if not stop:
- stop = content_length - 1
- start, stop = int(start), int(stop)
- if start >= content_length:
- # From rfc 2616 sec 14.16:
- # "If the server receives a request (other than one
- # including an If-Range request-header field) with an
- # unsatisfiable Range request-header field (that is,
- # all of whose byte-range-spec values have a first-byte-pos
- # value greater than the current length of the selected
- # resource), it SHOULD return a response code of 416
- # (Requested range not satisfiable)."
- continue
- if stop < start:
- # From rfc 2616 sec 14.16:
- # "If the server ignores a byte-range-spec because it
- # is syntactically invalid, the server SHOULD treat
- # the request as if the invalid Range header field
- # did not exist. (Normally, this means return a 200
- # response containing the full entity)."
- return None
- result.append((start, stop + 1))
- else:
- if not stop:
- # See rfc quote above.
- return None
- # Negative subscript (last N bytes)
- result.append((content_length - int(stop), content_length))
-
- return result
-
-
-class HeaderElement(object):
- """An element (with parameters) from an HTTP header's element list."""
-
- def __init__(self, value, params=None):
- self.value = value
- if params is None:
- params = {}
- self.params = params
-
- def __cmp__(self, other):
- return cmp(self.value, other.value)
-
- def __lt__(self, other):
- return self.value < other.value
-
- def __str__(self):
- p = [";%s=%s" % (k, v) for k, v in iteritems(self.params)]
- return "%s%s" % (self.value, "".join(p))
-
- def __bytes__(self):
- return ntob(self.__str__())
-
- def __unicode__(self):
- return ntou(self.__str__())
-
- def parse(elementstr):
- """Transform 'token;key=val' to ('token', {'key': 'val'})."""
- # Split the element into a value and parameters. The 'value' may
- # be of the form, "token=token", but we don't split that here.
- atoms = [x.strip() for x in elementstr.split(";") if x.strip()]
- if not atoms:
- initial_value = ''
- else:
- initial_value = atoms.pop(0).strip()
- params = {}
- for atom in atoms:
- atom = [x.strip() for x in atom.split("=", 1) if x.strip()]
- key = atom.pop(0)
- if atom:
- val = atom[0]
- else:
- val = ""
- params[key] = val
- return initial_value, params
- parse = staticmethod(parse)
-
- def from_str(cls, elementstr):
- """Construct an instance from a string of the form 'token;key=val'."""
- ival, params = cls.parse(elementstr)
- return cls(ival, params)
- from_str = classmethod(from_str)
-
-
-q_separator = re.compile(r'; *q *=')
-
-class AcceptElement(HeaderElement):
- """An element (with parameters) from an Accept* header's element list.
-
- AcceptElement objects are comparable; the more-preferred object will be
- "less than" the less-preferred object. They are also therefore sortable;
- if you sort a list of AcceptElement objects, they will be listed in
- priority order; the most preferred value will be first. Yes, it should
- have been the other way around, but it's too late to fix now.
- """
-
- def from_str(cls, elementstr):
- qvalue = None
- # The first "q" parameter (if any) separates the initial
- # media-range parameter(s) (if any) from the accept-params.
- atoms = q_separator.split(elementstr, 1)
- media_range = atoms.pop(0).strip()
- if atoms:
- # The qvalue for an Accept header can have extensions. The other
- # headers cannot, but it's easier to parse them as if they did.
- qvalue = HeaderElement.from_str(atoms[0].strip())
-
- media_type, params = cls.parse(media_range)
- if qvalue is not None:
- params["q"] = qvalue
- return cls(media_type, params)
- from_str = classmethod(from_str)
-
- def qvalue(self):
- val = self.params.get("q", "1")
- if isinstance(val, HeaderElement):
- val = val.value
- return float(val)
- qvalue = property(qvalue, doc="The qvalue, or priority, of this value.")
-
- def __cmp__(self, other):
- diff = cmp(self.qvalue, other.qvalue)
- if diff == 0:
- diff = cmp(str(self), str(other))
- return diff
-
- def __lt__(self, other):
- if self.qvalue == other.qvalue:
- return str(self) < str(other)
- else:
- return self.qvalue < other.qvalue
-
-
-def header_elements(fieldname, fieldvalue):
- """Return a sorted HeaderElement list from a comma-separated header string."""
- if not fieldvalue:
- return []
-
- result = []
- for element in fieldvalue.split(","):
- if fieldname.startswith("Accept") or fieldname == 'TE':
- hv = AcceptElement.from_str(element)
- else:
- hv = HeaderElement.from_str(element)
- result.append(hv)
-
- return list(reversed(sorted(result)))
-
-def decode_TEXT(value):
- r"""Decode :rfc:`2047` TEXT (e.g. "=?utf-8?q?f=C3=BCr?=" -> "f\xfcr")."""
- try:
- # Python 3
- from email.header import decode_header
- except ImportError:
- from email.Header import decode_header
- atoms = decode_header(value)
- decodedvalue = ""
- for atom, charset in atoms:
- if charset is not None:
- atom = atom.decode(charset)
- decodedvalue += atom
- return decodedvalue
-
-def valid_status(status):
- """Return legal HTTP status Code, Reason-phrase and Message.
-
- The status arg must be an int, or a str that begins with an int.
-
- If status is an int, or a str and no reason-phrase is supplied,
- a default reason-phrase will be provided.
- """
-
- if not status:
- status = 200
-
- status = str(status)
- parts = status.split(" ", 1)
- if len(parts) == 1:
- # No reason supplied.
- code, = parts
- reason = None
- else:
- code, reason = parts
- reason = reason.strip()
-
- try:
- code = int(code)
- except ValueError:
- raise ValueError("Illegal response status from server "
- "(%s is non-numeric)." % repr(code))
-
- if code < 100 or code > 599:
- raise ValueError("Illegal response status from server "
- "(%s is out of range)." % repr(code))
-
- if code not in response_codes:
- # code is unknown but not illegal
- default_reason, message = "", ""
- else:
- default_reason, message = response_codes[code]
-
- if reason is None:
- reason = default_reason
-
- return code, reason, message
-
-
-# NOTE: the parse_qs functions that follow are modified version of those
-# in the python3.0 source - we need to pass through an encoding to the unquote
-# method, but the default parse_qs function doesn't allow us to. These do.
-
-def _parse_qs(qs, keep_blank_values=0, strict_parsing=0, encoding='utf-8'):
- """Parse a query given as a string argument.
-
- Arguments:
-
- qs: URL-encoded query string to be parsed
-
- keep_blank_values: flag indicating whether blank values in
- URL encoded queries should be treated as blank strings. A
- true value indicates that blanks should be retained as blank
- strings. The default false value indicates that blank values
- are to be ignored and treated as if they were not included.
-
- strict_parsing: flag indicating what to do with parsing errors. If
- false (the default), errors are silently ignored. If true,
- errors raise a ValueError exception.
-
- Returns a dict, as G-d intended.
- """
- pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
- d = {}
- for name_value in pairs:
- if not name_value and not strict_parsing:
- continue
- nv = name_value.split('=', 1)
- if len(nv) != 2:
- if strict_parsing:
- raise ValueError("bad query field: %r" % (name_value,))
- # Handle case of a control-name with no equal sign
- if keep_blank_values:
- nv.append('')
- else:
- continue
- if len(nv[1]) or keep_blank_values:
- name = unquote_qs(nv[0], encoding)
- value = unquote_qs(nv[1], encoding)
- if name in d:
- if not isinstance(d[name], list):
- d[name] = [d[name]]
- d[name].append(value)
- else:
- d[name] = value
- return d
-
-
-image_map_pattern = re.compile(r"[0-9]+,[0-9]+")
-
-def parse_query_string(query_string, keep_blank_values=True, encoding='utf-8'):
- """Build a params dictionary from a query_string.
-
- Duplicate key/value pairs in the provided query_string will be
- returned as {'key': [val1, val2, ...]}. Single key/values will
- be returned as strings: {'key': 'value'}.
- """
- if image_map_pattern.match(query_string):
- # Server-side image map. Map the coords to 'x' and 'y'
- # (like CGI::Request does).
- pm = query_string.split(",")
- pm = {'x': int(pm[0]), 'y': int(pm[1])}
- else:
- pm = _parse_qs(query_string, keep_blank_values, encoding=encoding)
- return pm
-
-
-class CaseInsensitiveDict(dict):
- """A case-insensitive dict subclass.
-
- Each key is changed on entry to str(key).title().
- """
-
- def __getitem__(self, key):
- return dict.__getitem__(self, str(key).title())
-
- def __setitem__(self, key, value):
- dict.__setitem__(self, str(key).title(), value)
-
- def __delitem__(self, key):
- dict.__delitem__(self, str(key).title())
-
- def __contains__(self, key):
- return dict.__contains__(self, str(key).title())
-
- def get(self, key, default=None):
- return dict.get(self, str(key).title(), default)
-
- if hasattr({}, 'has_key'):
- def has_key(self, key):
- return dict.has_key(self, str(key).title())
-
- def update(self, E):
- for k in E.keys():
- self[str(k).title()] = E[k]
-
- def fromkeys(cls, seq, value=None):
- newdict = cls()
- for k in seq:
- newdict[str(k).title()] = value
- return newdict
- fromkeys = classmethod(fromkeys)
-
- def setdefault(self, key, x=None):
- key = str(key).title()
- try:
- return self[key]
- except KeyError:
- self[key] = x
- return x
-
- def pop(self, key, default):
- return dict.pop(self, str(key).title(), default)
-
-
-# TEXT =
-#
-# A CRLF is allowed in the definition of TEXT only as part of a header
-# field continuation. It is expected that the folding LWS will be
-# replaced with a single SP before interpretation of the TEXT value."
-if nativestr == bytestr:
- header_translate_table = ''.join([chr(i) for i in xrange(256)])
- header_translate_deletechars = ''.join([chr(i) for i in xrange(32)]) + chr(127)
-else:
- header_translate_table = None
- header_translate_deletechars = bytes(range(32)) + bytes([127])
-
-
-class HeaderMap(CaseInsensitiveDict):
- """A dict subclass for HTTP request and response headers.
-
- Each key is changed on entry to str(key).title(). This allows headers
- to be case-insensitive and avoid duplicates.
-
- Values are header values (decoded according to :rfc:`2047` if necessary).
- """
-
- protocol=(1, 1)
- encodings = ["ISO-8859-1"]
-
- # Someday, when http-bis is done, this will probably get dropped
- # since few servers, clients, or intermediaries do it. But until then,
- # we're going to obey the spec as is.
- # "Words of *TEXT MAY contain characters from character sets other than
- # ISO-8859-1 only when encoded according to the rules of RFC 2047."
- use_rfc_2047 = True
-
- def elements(self, key):
- """Return a sorted list of HeaderElements for the given header."""
- key = str(key).title()
- value = self.get(key)
- return header_elements(key, value)
-
- def values(self, key):
- """Return a sorted list of HeaderElement.value for the given header."""
- return [e.value for e in self.elements(key)]
-
- def output(self):
- """Transform self into a list of (name, value) tuples."""
- header_list = []
- for k, v in self.items():
- if isinstance(k, unicodestr):
- k = self.encode(k)
-
- if not isinstance(v, basestring):
- v = str(v)
-
- if isinstance(v, unicodestr):
- v = self.encode(v)
-
- # See header_translate_* constants above.
- # Replace only if you really know what you're doing.
- k = k.translate(header_translate_table, header_translate_deletechars)
- v = v.translate(header_translate_table, header_translate_deletechars)
-
- header_list.append((k, v))
- return header_list
-
- def encode(self, v):
- """Return the given header name or value, encoded for HTTP output."""
- for enc in self.encodings:
- try:
- return v.encode(enc)
- except UnicodeEncodeError:
- continue
-
- if self.protocol == (1, 1) and self.use_rfc_2047:
- # Encode RFC-2047 TEXT
- # (e.g. u"\u8200" -> "=?utf-8?b?6IiA?=").
- # We do our own here instead of using the email module
- # because we never want to fold lines--folding has
- # been deprecated by the HTTP working group.
- v = b2a_base64(v.encode('utf-8'))
- return (ntob('=?utf-8?b?') + v.strip(ntob('\n')) + ntob('?='))
-
- raise ValueError("Could not encode header part %r using "
- "any of the encodings %r." %
- (v, self.encodings))
-
-
-class Host(object):
- """An internet address.
-
- name
- Should be the client's host name. If not available (because no DNS
- lookup is performed), the IP address should be used instead.
-
- """
-
- ip = "0.0.0.0"
- port = 80
- name = "unknown.tld"
-
- def __init__(self, ip, port, name=None):
- self.ip = ip
- self.port = port
- if name is None:
- name = ip
- self.name = name
-
- def __repr__(self):
- return "httputil.Host(%r, %r, %r)" % (self.ip, self.port, self.name)
diff --git a/python-packages/cherrypy/lib/jsontools.py b/python-packages/cherrypy/lib/jsontools.py
deleted file mode 100644
index 209257914f..0000000000
--- a/python-packages/cherrypy/lib/jsontools.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import sys
-import cherrypy
-from cherrypy._cpcompat import basestring, ntou, json, json_encode, json_decode
-
-def json_processor(entity):
- """Read application/json data into request.json."""
- if not entity.headers.get(ntou("Content-Length"), ntou("")):
- raise cherrypy.HTTPError(411)
-
- body = entity.fp.read()
- try:
- cherrypy.serving.request.json = json_decode(body.decode('utf-8'))
- except ValueError:
- raise cherrypy.HTTPError(400, 'Invalid JSON document')
-
-def json_in(content_type=[ntou('application/json'), ntou('text/javascript')],
- force=True, debug=False, processor = json_processor):
- """Add a processor to parse JSON request entities:
- The default processor places the parsed data into request.json.
-
- Incoming request entities which match the given content_type(s) will
- be deserialized from JSON to the Python equivalent, and the result
- stored at cherrypy.request.json. The 'content_type' argument may
- be a Content-Type string or a list of allowable Content-Type strings.
-
- If the 'force' argument is True (the default), then entities of other
- content types will not be allowed; "415 Unsupported Media Type" is
- raised instead.
-
- Supply your own processor to use a custom decoder, or to handle the parsed
- data differently. The processor can be configured via
- tools.json_in.processor or via the decorator method.
-
- Note that the deserializer requires the client send a Content-Length
- request header, or it will raise "411 Length Required". If for any
- other reason the request entity cannot be deserialized from JSON,
- it will raise "400 Bad Request: Invalid JSON document".
-
- You must be using Python 2.6 or greater, or have the 'simplejson'
- package importable; otherwise, ValueError is raised during processing.
- """
- request = cherrypy.serving.request
- if isinstance(content_type, basestring):
- content_type = [content_type]
-
- if force:
- if debug:
- cherrypy.log('Removing body processors %s' %
- repr(request.body.processors.keys()), 'TOOLS.JSON_IN')
- request.body.processors.clear()
- request.body.default_proc = cherrypy.HTTPError(
- 415, 'Expected an entity of content type %s' %
- ', '.join(content_type))
-
- for ct in content_type:
- if debug:
- cherrypy.log('Adding body processor for %s' % ct, 'TOOLS.JSON_IN')
- request.body.processors[ct] = processor
-
-def json_handler(*args, **kwargs):
- value = cherrypy.serving.request._json_inner_handler(*args, **kwargs)
- return json_encode(value)
-
-def json_out(content_type='application/json', debug=False, handler=json_handler):
- """Wrap request.handler to serialize its output to JSON. Sets Content-Type.
-
- If the given content_type is None, the Content-Type response header
- is not set.
-
- Provide your own handler to use a custom encoder. For example
- cherrypy.config['tools.json_out.handler'] = , or
- @json_out(handler=function).
-
- You must be using Python 2.6 or greater, or have the 'simplejson'
- package importable; otherwise, ValueError is raised during processing.
- """
- request = cherrypy.serving.request
- if debug:
- cherrypy.log('Replacing %s with JSON handler' % request.handler,
- 'TOOLS.JSON_OUT')
- request._json_inner_handler = request.handler
- request.handler = handler
- if content_type is not None:
- if debug:
- cherrypy.log('Setting Content-Type to %s' % content_type, 'TOOLS.JSON_OUT')
- cherrypy.serving.response.headers['Content-Type'] = content_type
-
diff --git a/python-packages/cherrypy/lib/profiler.py b/python-packages/cherrypy/lib/profiler.py
deleted file mode 100644
index 785d58a302..0000000000
--- a/python-packages/cherrypy/lib/profiler.py
+++ /dev/null
@@ -1,208 +0,0 @@
-"""Profiler tools for CherryPy.
-
-CherryPy users
-==============
-
-You can profile any of your pages as follows::
-
- from cherrypy.lib import profiler
-
- class Root:
- p = profile.Profiler("/path/to/profile/dir")
-
- def index(self):
- self.p.run(self._index)
- index.exposed = True
-
- def _index(self):
- return "Hello, world!"
-
- cherrypy.tree.mount(Root())
-
-You can also turn on profiling for all requests
-using the ``make_app`` function as WSGI middleware.
-
-CherryPy developers
-===================
-
-This module can be used whenever you make changes to CherryPy,
-to get a quick sanity-check on overall CP performance. Use the
-``--profile`` flag when running the test suite. Then, use the ``serve()``
-function to browse the results in a web browser. If you run this
-module from the command line, it will call ``serve()`` for you.
-
-"""
-
-
-def new_func_strip_path(func_name):
- """Make profiler output more readable by adding ``__init__`` modules' parents"""
- filename, line, name = func_name
- if filename.endswith("__init__.py"):
- return os.path.basename(filename[:-12]) + filename[-12:], line, name
- return os.path.basename(filename), line, name
-
-try:
- import profile
- import pstats
- pstats.func_strip_path = new_func_strip_path
-except ImportError:
- profile = None
- pstats = None
-
-import os, os.path
-import sys
-import warnings
-
-from cherrypy._cpcompat import BytesIO
-
-_count = 0
-
-class Profiler(object):
-
- def __init__(self, path=None):
- if not path:
- path = os.path.join(os.path.dirname(__file__), "profile")
- self.path = path
- if not os.path.exists(path):
- os.makedirs(path)
-
- def run(self, func, *args, **params):
- """Dump profile data into self.path."""
- global _count
- c = _count = _count + 1
- path = os.path.join(self.path, "cp_%04d.prof" % c)
- prof = profile.Profile()
- result = prof.runcall(func, *args, **params)
- prof.dump_stats(path)
- return result
-
- def statfiles(self):
- """:rtype: list of available profiles.
- """
- return [f for f in os.listdir(self.path)
- if f.startswith("cp_") and f.endswith(".prof")]
-
- def stats(self, filename, sortby='cumulative'):
- """:rtype stats(index): output of print_stats() for the given profile.
- """
- sio = BytesIO()
- if sys.version_info >= (2, 5):
- s = pstats.Stats(os.path.join(self.path, filename), stream=sio)
- s.strip_dirs()
- s.sort_stats(sortby)
- s.print_stats()
- else:
- # pstats.Stats before Python 2.5 didn't take a 'stream' arg,
- # but just printed to stdout. So re-route stdout.
- s = pstats.Stats(os.path.join(self.path, filename))
- s.strip_dirs()
- s.sort_stats(sortby)
- oldout = sys.stdout
- try:
- sys.stdout = sio
- s.print_stats()
- finally:
- sys.stdout = oldout
- response = sio.getvalue()
- sio.close()
- return response
-
- def index(self):
- return """
- CherryPy profile data
-
-
- """
- index.exposed = True
-
- def menu(self):
- yield "Profiling runs"
- yield "Click on one of the runs below to see profiling data. "
- runs = self.statfiles()
- runs.sort()
- for i in runs:
- yield "%s " % (i, i)
- menu.exposed = True
-
- def report(self, filename):
- import cherrypy
- cherrypy.response.headers['Content-Type'] = 'text/plain'
- return self.stats(filename)
- report.exposed = True
-
-
-class ProfileAggregator(Profiler):
-
- def __init__(self, path=None):
- Profiler.__init__(self, path)
- global _count
- self.count = _count = _count + 1
- self.profiler = profile.Profile()
-
- def run(self, func, *args):
- path = os.path.join(self.path, "cp_%04d.prof" % self.count)
- result = self.profiler.runcall(func, *args)
- self.profiler.dump_stats(path)
- return result
-
-
-class make_app:
- def __init__(self, nextapp, path=None, aggregate=False):
- """Make a WSGI middleware app which wraps 'nextapp' with profiling.
-
- nextapp
- the WSGI application to wrap, usually an instance of
- cherrypy.Application.
-
- path
- where to dump the profiling output.
-
- aggregate
- if True, profile data for all HTTP requests will go in
- a single file. If False (the default), each HTTP request will
- dump its profile data into a separate file.
-
- """
- if profile is None or pstats is None:
- msg = ("Your installation of Python does not have a profile module. "
- "If you're on Debian, try `sudo apt-get install python-profiler`. "
- "See http://www.cherrypy.org/wiki/ProfilingOnDebian for details.")
- warnings.warn(msg)
-
- self.nextapp = nextapp
- self.aggregate = aggregate
- if aggregate:
- self.profiler = ProfileAggregator(path)
- else:
- self.profiler = Profiler(path)
-
- def __call__(self, environ, start_response):
- def gather():
- result = []
- for line in self.nextapp(environ, start_response):
- result.append(line)
- return result
- return self.profiler.run(gather)
-
-
-def serve(path=None, port=8080):
- if profile is None or pstats is None:
- msg = ("Your installation of Python does not have a profile module. "
- "If you're on Debian, try `sudo apt-get install python-profiler`. "
- "See http://www.cherrypy.org/wiki/ProfilingOnDebian for details.")
- warnings.warn(msg)
-
- import cherrypy
- cherrypy.config.update({'server.socket_port': int(port),
- 'server.thread_pool': 10,
- 'environment': "production",
- })
- cherrypy.quickstart(Profiler(path))
-
-
-if __name__ == "__main__":
- serve(*tuple(sys.argv[1:]))
-
diff --git a/python-packages/cherrypy/lib/reprconf.py b/python-packages/cherrypy/lib/reprconf.py
deleted file mode 100644
index ba8ff51e41..0000000000
--- a/python-packages/cherrypy/lib/reprconf.py
+++ /dev/null
@@ -1,485 +0,0 @@
-"""Generic configuration system using unrepr.
-
-Configuration data may be supplied as a Python dictionary, as a filename,
-or as an open file object. When you supply a filename or file, Python's
-builtin ConfigParser is used (with some extensions).
-
-Namespaces
-----------
-
-Configuration keys are separated into namespaces by the first "." in the key.
-
-The only key that cannot exist in a namespace is the "environment" entry.
-This special entry 'imports' other config entries from a template stored in
-the Config.environments dict.
-
-You can define your own namespaces to be called when new config is merged
-by adding a named handler to Config.namespaces. The name can be any string,
-and the handler must be either a callable or a context manager.
-"""
-
-try:
- # Python 3.0+
- from configparser import ConfigParser
-except ImportError:
- from ConfigParser import ConfigParser
-
-try:
- set
-except NameError:
- from sets import Set as set
-
-try:
- basestring
-except NameError:
- basestring = str
-
-try:
- # Python 3
- import builtins
-except ImportError:
- # Python 2
- import __builtin__ as builtins
-
-import operator as _operator
-import sys
-
-def as_dict(config):
- """Return a dict from 'config' whether it is a dict, file, or filename."""
- if isinstance(config, basestring):
- config = Parser().dict_from_file(config)
- elif hasattr(config, 'read'):
- config = Parser().dict_from_file(config)
- return config
-
-
-class NamespaceSet(dict):
- """A dict of config namespace names and handlers.
-
- Each config entry should begin with a namespace name; the corresponding
- namespace handler will be called once for each config entry in that
- namespace, and will be passed two arguments: the config key (with the
- namespace removed) and the config value.
-
- Namespace handlers may be any Python callable; they may also be
- Python 2.5-style 'context managers', in which case their __enter__
- method should return a callable to be used as the handler.
- See cherrypy.tools (the Toolbox class) for an example.
- """
-
- def __call__(self, config):
- """Iterate through config and pass it to each namespace handler.
-
- config
- A flat dict, where keys use dots to separate
- namespaces, and values are arbitrary.
-
- The first name in each config key is used to look up the corresponding
- namespace handler. For example, a config entry of {'tools.gzip.on': v}
- will call the 'tools' namespace handler with the args: ('gzip.on', v)
- """
- # Separate the given config into namespaces
- ns_confs = {}
- for k in config:
- if "." in k:
- ns, name = k.split(".", 1)
- bucket = ns_confs.setdefault(ns, {})
- bucket[name] = config[k]
-
- # I chose __enter__ and __exit__ so someday this could be
- # rewritten using Python 2.5's 'with' statement:
- # for ns, handler in self.iteritems():
- # with handler as callable:
- # for k, v in ns_confs.get(ns, {}).iteritems():
- # callable(k, v)
- for ns, handler in self.items():
- exit = getattr(handler, "__exit__", None)
- if exit:
- callable = handler.__enter__()
- no_exc = True
- try:
- try:
- for k, v in ns_confs.get(ns, {}).items():
- callable(k, v)
- except:
- # The exceptional case is handled here
- no_exc = False
- if exit is None:
- raise
- if not exit(*sys.exc_info()):
- raise
- # The exception is swallowed if exit() returns true
- finally:
- # The normal and non-local-goto cases are handled here
- if no_exc and exit:
- exit(None, None, None)
- else:
- for k, v in ns_confs.get(ns, {}).items():
- handler(k, v)
-
- def __repr__(self):
- return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,
- dict.__repr__(self))
-
- def __copy__(self):
- newobj = self.__class__()
- newobj.update(self)
- return newobj
- copy = __copy__
-
-
-class Config(dict):
- """A dict-like set of configuration data, with defaults and namespaces.
-
- May take a file, filename, or dict.
- """
-
- defaults = {}
- environments = {}
- namespaces = NamespaceSet()
-
- def __init__(self, file=None, **kwargs):
- self.reset()
- if file is not None:
- self.update(file)
- if kwargs:
- self.update(kwargs)
-
- def reset(self):
- """Reset self to default values."""
- self.clear()
- dict.update(self, self.defaults)
-
- def update(self, config):
- """Update self from a dict, file or filename."""
- if isinstance(config, basestring):
- # Filename
- config = Parser().dict_from_file(config)
- elif hasattr(config, 'read'):
- # Open file object
- config = Parser().dict_from_file(config)
- else:
- config = config.copy()
- self._apply(config)
-
- def _apply(self, config):
- """Update self from a dict."""
- which_env = config.get('environment')
- if which_env:
- env = self.environments[which_env]
- for k in env:
- if k not in config:
- config[k] = env[k]
-
- dict.update(self, config)
- self.namespaces(config)
-
- def __setitem__(self, k, v):
- dict.__setitem__(self, k, v)
- self.namespaces({k: v})
-
-
-class Parser(ConfigParser):
- """Sub-class of ConfigParser that keeps the case of options and that
- raises an exception if the file cannot be read.
- """
-
- def optionxform(self, optionstr):
- return optionstr
-
- def read(self, filenames):
- if isinstance(filenames, basestring):
- filenames = [filenames]
- for filename in filenames:
- # try:
- # fp = open(filename)
- # except IOError:
- # continue
- fp = open(filename)
- try:
- self._read(fp, filename)
- finally:
- fp.close()
-
- def as_dict(self, raw=False, vars=None):
- """Convert an INI file to a dictionary"""
- # Load INI file into a dict
- result = {}
- for section in self.sections():
- if section not in result:
- result[section] = {}
- for option in self.options(section):
- value = self.get(section, option, raw=raw, vars=vars)
- try:
- value = unrepr(value)
- except Exception:
- x = sys.exc_info()[1]
- msg = ("Config error in section: %r, option: %r, "
- "value: %r. Config values must be valid Python." %
- (section, option, value))
- raise ValueError(msg, x.__class__.__name__, x.args)
- result[section][option] = value
- return result
-
- def dict_from_file(self, file):
- if hasattr(file, 'read'):
- self.readfp(file)
- else:
- self.read(file)
- return self.as_dict()
-
-
-# public domain "unrepr" implementation, found on the web and then improved.
-
-
-class _Builder2:
-
- def build(self, o):
- m = getattr(self, 'build_' + o.__class__.__name__, None)
- if m is None:
- raise TypeError("unrepr does not recognize %s" %
- repr(o.__class__.__name__))
- return m(o)
-
- def astnode(self, s):
- """Return a Python2 ast Node compiled from a string."""
- try:
- import compiler
- except ImportError:
- # Fallback to eval when compiler package is not available,
- # e.g. IronPython 1.0.
- return eval(s)
-
- p = compiler.parse("__tempvalue__ = " + s)
- return p.getChildren()[1].getChildren()[0].getChildren()[1]
-
- def build_Subscript(self, o):
- expr, flags, subs = o.getChildren()
- expr = self.build(expr)
- subs = self.build(subs)
- return expr[subs]
-
- def build_CallFunc(self, o):
- children = map(self.build, o.getChildren())
- callee = children.pop(0)
- kwargs = children.pop() or {}
- starargs = children.pop() or ()
- args = tuple(children) + tuple(starargs)
- return callee(*args, **kwargs)
-
- def build_List(self, o):
- return map(self.build, o.getChildren())
-
- def build_Const(self, o):
- return o.value
-
- def build_Dict(self, o):
- d = {}
- i = iter(map(self.build, o.getChildren()))
- for el in i:
- d[el] = i.next()
- return d
-
- def build_Tuple(self, o):
- return tuple(self.build_List(o))
-
- def build_Name(self, o):
- name = o.name
- if name == 'None':
- return None
- if name == 'True':
- return True
- if name == 'False':
- return False
-
- # See if the Name is a package or module. If it is, import it.
- try:
- return modules(name)
- except ImportError:
- pass
-
- # See if the Name is in builtins.
- try:
- return getattr(builtins, name)
- except AttributeError:
- pass
-
- raise TypeError("unrepr could not resolve the name %s" % repr(name))
-
- def build_Add(self, o):
- left, right = map(self.build, o.getChildren())
- return left + right
-
- def build_Mul(self, o):
- left, right = map(self.build, o.getChildren())
- return left * right
-
- def build_Getattr(self, o):
- parent = self.build(o.expr)
- return getattr(parent, o.attrname)
-
- def build_NoneType(self, o):
- return None
-
- def build_UnarySub(self, o):
- return -self.build(o.getChildren()[0])
-
- def build_UnaryAdd(self, o):
- return self.build(o.getChildren()[0])
-
-
-class _Builder3:
-
- def build(self, o):
- m = getattr(self, 'build_' + o.__class__.__name__, None)
- if m is None:
- raise TypeError("unrepr does not recognize %s" %
- repr(o.__class__.__name__))
- return m(o)
-
- def astnode(self, s):
- """Return a Python3 ast Node compiled from a string."""
- try:
- import ast
- except ImportError:
- # Fallback to eval when ast package is not available,
- # e.g. IronPython 1.0.
- return eval(s)
-
- p = ast.parse("__tempvalue__ = " + s)
- return p.body[0].value
-
- def build_Subscript(self, o):
- return self.build(o.value)[self.build(o.slice)]
-
- def build_Index(self, o):
- return self.build(o.value)
-
- def build_Call(self, o):
- callee = self.build(o.func)
-
- if o.args is None:
- args = ()
- else:
- args = tuple([self.build(a) for a in o.args])
-
- if o.starargs is None:
- starargs = ()
- else:
- starargs = self.build(o.starargs)
-
- if o.kwargs is None:
- kwargs = {}
- else:
- kwargs = self.build(o.kwargs)
-
- return callee(*(args + starargs), **kwargs)
-
- def build_List(self, o):
- return list(map(self.build, o.elts))
-
- def build_Str(self, o):
- return o.s
-
- def build_Num(self, o):
- return o.n
-
- def build_Dict(self, o):
- return dict([(self.build(k), self.build(v))
- for k, v in zip(o.keys, o.values)])
-
- def build_Tuple(self, o):
- return tuple(self.build_List(o))
-
- def build_Name(self, o):
- name = o.id
- if name == 'None':
- return None
- if name == 'True':
- return True
- if name == 'False':
- return False
-
- # See if the Name is a package or module. If it is, import it.
- try:
- return modules(name)
- except ImportError:
- pass
-
- # See if the Name is in builtins.
- try:
- import builtins
- return getattr(builtins, name)
- except AttributeError:
- pass
-
- raise TypeError("unrepr could not resolve the name %s" % repr(name))
-
- def build_UnaryOp(self, o):
- op, operand = map(self.build, [o.op, o.operand])
- return op(operand)
-
- def build_BinOp(self, o):
- left, op, right = map(self.build, [o.left, o.op, o.right])
- return op(left, right)
-
- def build_Add(self, o):
- return _operator.add
-
- def build_Mult(self, o):
- return _operator.mul
-
- def build_USub(self, o):
- return _operator.neg
-
- def build_Attribute(self, o):
- parent = self.build(o.value)
- return getattr(parent, o.attr)
-
- def build_NoneType(self, o):
- return None
-
-
-def unrepr(s):
- """Return a Python object compiled from a string."""
- if not s:
- return s
- if sys.version_info < (3, 0):
- b = _Builder2()
- else:
- b = _Builder3()
- obj = b.astnode(s)
- return b.build(obj)
-
-
-def modules(modulePath):
- """Load a module and retrieve a reference to that module."""
- try:
- mod = sys.modules[modulePath]
- if mod is None:
- raise KeyError()
- except KeyError:
- # The last [''] is important.
- mod = __import__(modulePath, globals(), locals(), [''])
- return mod
-
-def attributes(full_attribute_name):
- """Load a module and retrieve an attribute of that module."""
-
- # Parse out the path, module, and attribute
- last_dot = full_attribute_name.rfind(".")
- attr_name = full_attribute_name[last_dot + 1:]
- mod_path = full_attribute_name[:last_dot]
-
- mod = modules(mod_path)
- # Let an AttributeError propagate outward.
- try:
- attr = getattr(mod, attr_name)
- except AttributeError:
- raise AttributeError("'%s' object has no attribute '%s'"
- % (mod_path, attr_name))
-
- # Return a reference to the attribute.
- return attr
-
-
diff --git a/python-packages/cherrypy/lib/sessions.py b/python-packages/cherrypy/lib/sessions.py
deleted file mode 100644
index 9763f12001..0000000000
--- a/python-packages/cherrypy/lib/sessions.py
+++ /dev/null
@@ -1,871 +0,0 @@
-"""Session implementation for CherryPy.
-
-You need to edit your config file to use sessions. Here's an example::
-
- [/]
- tools.sessions.on = True
- tools.sessions.storage_type = "file"
- tools.sessions.storage_path = "/home/site/sessions"
- tools.sessions.timeout = 60
-
-This sets the session to be stored in files in the directory /home/site/sessions,
-and the session timeout to 60 minutes. If you omit ``storage_type`` the sessions
-will be saved in RAM. ``tools.sessions.on`` is the only required line for
-working sessions, the rest are optional.
-
-By default, the session ID is passed in a cookie, so the client's browser must
-have cookies enabled for your site.
-
-To set data for the current session, use
-``cherrypy.session['fieldname'] = 'fieldvalue'``;
-to get data use ``cherrypy.session.get('fieldname')``.
-
-================
-Locking sessions
-================
-
-By default, the ``'locking'`` mode of sessions is ``'implicit'``, which means
-the session is locked early and unlocked late. If you want to control when the
-session data is locked and unlocked, set ``tools.sessions.locking = 'explicit'``.
-Then call ``cherrypy.session.acquire_lock()`` and ``cherrypy.session.release_lock()``.
-Regardless of which mode you use, the session is guaranteed to be unlocked when
-the request is complete.
-
-=================
-Expiring Sessions
-=================
-
-You can force a session to expire with :func:`cherrypy.lib.sessions.expire`.
-Simply call that function at the point you want the session to expire, and it
-will cause the session cookie to expire client-side.
-
-===========================
-Session Fixation Protection
-===========================
-
-If CherryPy receives, via a request cookie, a session id that it does not
-recognize, it will reject that id and create a new one to return in the
-response cookie. This `helps prevent session fixation attacks
-`_.
-However, CherryPy "recognizes" a session id by looking up the saved session
-data for that id. Therefore, if you never save any session data,
-**you will get a new session id for every request**.
-
-================
-Sharing Sessions
-================
-
-If you run multiple instances of CherryPy (for example via mod_python behind
-Apache prefork), you most likely cannot use the RAM session backend, since each
-instance of CherryPy will have its own memory space. Use a different backend
-instead, and verify that all instances are pointing at the same file or db
-location. Alternately, you might try a load balancer which makes sessions
-"sticky". Google is your friend, there.
-
-================
-Expiration Dates
-================
-
-The response cookie will possess an expiration date to inform the client at
-which point to stop sending the cookie back in requests. If the server time
-and client time differ, expect sessions to be unreliable. **Make sure the
-system time of your server is accurate**.
-
-CherryPy defaults to a 60-minute session timeout, which also applies to the
-cookie which is sent to the client. Unfortunately, some versions of Safari
-("4 public beta" on Windows XP at least) appear to have a bug in their parsing
-of the GMT expiration date--they appear to interpret the date as one hour in
-the past. Sixty minutes minus one hour is pretty close to zero, so you may
-experience this bug as a new session id for every request, unless the requests
-are less than one second apart. To fix, try increasing the session.timeout.
-
-On the other extreme, some users report Firefox sending cookies after their
-expiration date, although this was on a system with an inaccurate system time.
-Maybe FF doesn't trust system time.
-"""
-
-import datetime
-import os
-import random
-import time
-import threading
-import types
-from warnings import warn
-
-import cherrypy
-from cherrypy._cpcompat import copyitems, pickle, random20, unicodestr
-from cherrypy.lib import httputil
-
-
-missing = object()
-
-class Session(object):
- """A CherryPy dict-like Session object (one per request)."""
-
- _id = None
-
- id_observers = None
- "A list of callbacks to which to pass new id's."
-
- def _get_id(self):
- return self._id
- def _set_id(self, value):
- self._id = value
- for o in self.id_observers:
- o(value)
- id = property(_get_id, _set_id, doc="The current session ID.")
-
- timeout = 60
- "Number of minutes after which to delete session data."
-
- locked = False
- """
- If True, this session instance has exclusive read/write access
- to session data."""
-
- loaded = False
- """
- If True, data has been retrieved from storage. This should happen
- automatically on the first attempt to access session data."""
-
- clean_thread = None
- "Class-level Monitor which calls self.clean_up."
-
- clean_freq = 5
- "The poll rate for expired session cleanup in minutes."
-
- originalid = None
- "The session id passed by the client. May be missing or unsafe."
-
- missing = False
- "True if the session requested by the client did not exist."
-
- regenerated = False
- """
- True if the application called session.regenerate(). This is not set by
- internal calls to regenerate the session id."""
-
- debug=False
-
- def __init__(self, id=None, **kwargs):
- self.id_observers = []
- self._data = {}
-
- for k, v in kwargs.items():
- setattr(self, k, v)
-
- self.originalid = id
- self.missing = False
- if id is None:
- if self.debug:
- cherrypy.log('No id given; making a new one', 'TOOLS.SESSIONS')
- self._regenerate()
- else:
- self.id = id
- if not self._exists():
- if self.debug:
- cherrypy.log('Expired or malicious session %r; '
- 'making a new one' % id, 'TOOLS.SESSIONS')
- # Expired or malicious session. Make a new one.
- # See http://www.cherrypy.org/ticket/709.
- self.id = None
- self.missing = True
- self._regenerate()
-
- def now(self):
- """Generate the session specific concept of 'now'.
-
- Other session providers can override this to use alternative,
- possibly timezone aware, versions of 'now'.
- """
- return datetime.datetime.now()
-
- def regenerate(self):
- """Replace the current session (with a new id)."""
- self.regenerated = True
- self._regenerate()
-
- def _regenerate(self):
- if self.id is not None:
- self.delete()
-
- old_session_was_locked = self.locked
- if old_session_was_locked:
- self.release_lock()
-
- self.id = None
- while self.id is None:
- self.id = self.generate_id()
- # Assert that the generated id is not already stored.
- if self._exists():
- self.id = None
-
- if old_session_was_locked:
- self.acquire_lock()
-
- def clean_up(self):
- """Clean up expired sessions."""
- pass
-
- def generate_id(self):
- """Return a new session id."""
- return random20()
-
- def save(self):
- """Save session data."""
- try:
- # If session data has never been loaded then it's never been
- # accessed: no need to save it
- if self.loaded:
- t = datetime.timedelta(seconds = self.timeout * 60)
- expiration_time = self.now() + t
- if self.debug:
- cherrypy.log('Saving with expiry %s' % expiration_time,
- 'TOOLS.SESSIONS')
- self._save(expiration_time)
-
- finally:
- if self.locked:
- # Always release the lock if the user didn't release it
- self.release_lock()
-
- def load(self):
- """Copy stored session data into this session instance."""
- data = self._load()
- # data is either None or a tuple (session_data, expiration_time)
- if data is None or data[1] < self.now():
- if self.debug:
- cherrypy.log('Expired session, flushing data', 'TOOLS.SESSIONS')
- self._data = {}
- else:
- self._data = data[0]
- self.loaded = True
-
- # Stick the clean_thread in the class, not the instance.
- # The instances are created and destroyed per-request.
- cls = self.__class__
- if self.clean_freq and not cls.clean_thread:
- # clean_up is in instancemethod and not a classmethod,
- # so that tool config can be accessed inside the method.
- t = cherrypy.process.plugins.Monitor(
- cherrypy.engine, self.clean_up, self.clean_freq * 60,
- name='Session cleanup')
- t.subscribe()
- cls.clean_thread = t
- t.start()
-
- def delete(self):
- """Delete stored session data."""
- self._delete()
-
- def __getitem__(self, key):
- if not self.loaded: self.load()
- return self._data[key]
-
- def __setitem__(self, key, value):
- if not self.loaded: self.load()
- self._data[key] = value
-
- def __delitem__(self, key):
- if not self.loaded: self.load()
- del self._data[key]
-
- def pop(self, key, default=missing):
- """Remove the specified key and return the corresponding value.
- If key is not found, default is returned if given,
- otherwise KeyError is raised.
- """
- if not self.loaded: self.load()
- if default is missing:
- return self._data.pop(key)
- else:
- return self._data.pop(key, default)
-
- def __contains__(self, key):
- if not self.loaded: self.load()
- return key in self._data
-
- if hasattr({}, 'has_key'):
- def has_key(self, key):
- """D.has_key(k) -> True if D has a key k, else False."""
- if not self.loaded: self.load()
- return key in self._data
-
- def get(self, key, default=None):
- """D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
- if not self.loaded: self.load()
- return self._data.get(key, default)
-
- def update(self, d):
- """D.update(E) -> None. Update D from E: for k in E: D[k] = E[k]."""
- if not self.loaded: self.load()
- self._data.update(d)
-
- def setdefault(self, key, default=None):
- """D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D."""
- if not self.loaded: self.load()
- return self._data.setdefault(key, default)
-
- def clear(self):
- """D.clear() -> None. Remove all items from D."""
- if not self.loaded: self.load()
- self._data.clear()
-
- def keys(self):
- """D.keys() -> list of D's keys."""
- if not self.loaded: self.load()
- return self._data.keys()
-
- def items(self):
- """D.items() -> list of D's (key, value) pairs, as 2-tuples."""
- if not self.loaded: self.load()
- return self._data.items()
-
- def values(self):
- """D.values() -> list of D's values."""
- if not self.loaded: self.load()
- return self._data.values()
-
-
-class RamSession(Session):
-
- # Class-level objects. Don't rebind these!
- cache = {}
- locks = {}
-
- def clean_up(self):
- """Clean up expired sessions."""
- now = self.now()
- for id, (data, expiration_time) in copyitems(self.cache):
- if expiration_time <= now:
- try:
- del self.cache[id]
- except KeyError:
- pass
- try:
- del self.locks[id]
- except KeyError:
- pass
-
- # added to remove obsolete lock objects
- for id in list(self.locks):
- if id not in self.cache:
- self.locks.pop(id, None)
-
- def _exists(self):
- return self.id in self.cache
-
- def _load(self):
- return self.cache.get(self.id)
-
- def _save(self, expiration_time):
- self.cache[self.id] = (self._data, expiration_time)
-
- def _delete(self):
- self.cache.pop(self.id, None)
-
- def acquire_lock(self):
- """Acquire an exclusive lock on the currently-loaded session data."""
- self.locked = True
- self.locks.setdefault(self.id, threading.RLock()).acquire()
-
- def release_lock(self):
- """Release the lock on the currently-loaded session data."""
- self.locks[self.id].release()
- self.locked = False
-
- def __len__(self):
- """Return the number of active sessions."""
- return len(self.cache)
-
-
-class FileSession(Session):
- """Implementation of the File backend for sessions
-
- storage_path
- The folder where session data will be saved. Each session
- will be saved as pickle.dump(data, expiration_time) in its own file;
- the filename will be self.SESSION_PREFIX + self.id.
-
- """
-
- SESSION_PREFIX = 'session-'
- LOCK_SUFFIX = '.lock'
- pickle_protocol = pickle.HIGHEST_PROTOCOL
-
- def __init__(self, id=None, **kwargs):
- # The 'storage_path' arg is required for file-based sessions.
- kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
- Session.__init__(self, id=id, **kwargs)
-
- def setup(cls, **kwargs):
- """Set up the storage system for file-based sessions.
-
- This should only be called once per process; this will be done
- automatically when using sessions.init (as the built-in Tool does).
- """
- # The 'storage_path' arg is required for file-based sessions.
- kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
-
- for k, v in kwargs.items():
- setattr(cls, k, v)
-
- # Warn if any lock files exist at startup.
- lockfiles = [fname for fname in os.listdir(cls.storage_path)
- if (fname.startswith(cls.SESSION_PREFIX)
- and fname.endswith(cls.LOCK_SUFFIX))]
- if lockfiles:
- plural = ('', 's')[len(lockfiles) > 1]
- warn("%s session lockfile%s found at startup. If you are "
- "only running one process, then you may need to "
- "manually delete the lockfiles found at %r."
- % (len(lockfiles), plural, cls.storage_path))
- setup = classmethod(setup)
-
- def _get_file_path(self):
- f = os.path.join(self.storage_path, self.SESSION_PREFIX + self.id)
- if not os.path.abspath(f).startswith(self.storage_path):
- raise cherrypy.HTTPError(400, "Invalid session id in cookie.")
- return f
-
- def _exists(self):
- path = self._get_file_path()
- return os.path.exists(path)
-
- def _load(self, path=None):
- if path is None:
- path = self._get_file_path()
- try:
- f = open(path, "rb")
- try:
- return pickle.load(f)
- finally:
- f.close()
- except (IOError, EOFError):
- return None
-
- def _save(self, expiration_time):
- f = open(self._get_file_path(), "wb")
- try:
- pickle.dump((self._data, expiration_time), f, self.pickle_protocol)
- finally:
- f.close()
-
- def _delete(self):
- try:
- os.unlink(self._get_file_path())
- except OSError:
- pass
-
- def acquire_lock(self, path=None):
- """Acquire an exclusive lock on the currently-loaded session data."""
- if path is None:
- path = self._get_file_path()
- path += self.LOCK_SUFFIX
- while True:
- try:
- lockfd = os.open(path, os.O_CREAT|os.O_WRONLY|os.O_EXCL)
- except OSError:
- time.sleep(0.1)
- else:
- os.close(lockfd)
- break
- self.locked = True
-
- def release_lock(self, path=None):
- """Release the lock on the currently-loaded session data."""
- if path is None:
- path = self._get_file_path()
- os.unlink(path + self.LOCK_SUFFIX)
- self.locked = False
-
- def clean_up(self):
- """Clean up expired sessions."""
- now = self.now()
- # Iterate over all session files in self.storage_path
- for fname in os.listdir(self.storage_path):
- if (fname.startswith(self.SESSION_PREFIX)
- and not fname.endswith(self.LOCK_SUFFIX)):
- # We have a session file: lock and load it and check
- # if it's expired. If it fails, nevermind.
- path = os.path.join(self.storage_path, fname)
- self.acquire_lock(path)
- try:
- contents = self._load(path)
- # _load returns None on IOError
- if contents is not None:
- data, expiration_time = contents
- if expiration_time < now:
- # Session expired: deleting it
- os.unlink(path)
- finally:
- self.release_lock(path)
-
- def __len__(self):
- """Return the number of active sessions."""
- return len([fname for fname in os.listdir(self.storage_path)
- if (fname.startswith(self.SESSION_PREFIX)
- and not fname.endswith(self.LOCK_SUFFIX))])
-
-
-class PostgresqlSession(Session):
- """ Implementation of the PostgreSQL backend for sessions. It assumes
- a table like this::
-
- create table session (
- id varchar(40),
- data text,
- expiration_time timestamp
- )
-
- You must provide your own get_db function.
- """
-
- pickle_protocol = pickle.HIGHEST_PROTOCOL
-
- def __init__(self, id=None, **kwargs):
- Session.__init__(self, id, **kwargs)
- self.cursor = self.db.cursor()
-
- def setup(cls, **kwargs):
- """Set up the storage system for Postgres-based sessions.
-
- This should only be called once per process; this will be done
- automatically when using sessions.init (as the built-in Tool does).
- """
- for k, v in kwargs.items():
- setattr(cls, k, v)
-
- self.db = self.get_db()
- setup = classmethod(setup)
-
- def __del__(self):
- if self.cursor:
- self.cursor.close()
- self.db.commit()
-
- def _exists(self):
- # Select session data from table
- self.cursor.execute('select data, expiration_time from session '
- 'where id=%s', (self.id,))
- rows = self.cursor.fetchall()
- return bool(rows)
-
- def _load(self):
- # Select session data from table
- self.cursor.execute('select data, expiration_time from session '
- 'where id=%s', (self.id,))
- rows = self.cursor.fetchall()
- if not rows:
- return None
-
- pickled_data, expiration_time = rows[0]
- data = pickle.loads(pickled_data)
- return data, expiration_time
-
- def _save(self, expiration_time):
- pickled_data = pickle.dumps(self._data, self.pickle_protocol)
- self.cursor.execute('update session set data = %s, '
- 'expiration_time = %s where id = %s',
- (pickled_data, expiration_time, self.id))
-
- def _delete(self):
- self.cursor.execute('delete from session where id=%s', (self.id,))
-
- def acquire_lock(self):
- """Acquire an exclusive lock on the currently-loaded session data."""
- # We use the "for update" clause to lock the row
- self.locked = True
- self.cursor.execute('select id from session where id=%s for update',
- (self.id,))
-
- def release_lock(self):
- """Release the lock on the currently-loaded session data."""
- # We just close the cursor and that will remove the lock
- # introduced by the "for update" clause
- self.cursor.close()
- self.locked = False
-
- def clean_up(self):
- """Clean up expired sessions."""
- self.cursor.execute('delete from session where expiration_time < %s',
- (self.now(),))
-
-
-class MemcachedSession(Session):
-
- # The most popular memcached client for Python isn't thread-safe.
- # Wrap all .get and .set operations in a single lock.
- mc_lock = threading.RLock()
-
- # This is a seperate set of locks per session id.
- locks = {}
-
- servers = ['127.0.0.1:11211']
-
- def setup(cls, **kwargs):
- """Set up the storage system for memcached-based sessions.
-
- This should only be called once per process; this will be done
- automatically when using sessions.init (as the built-in Tool does).
- """
- for k, v in kwargs.items():
- setattr(cls, k, v)
-
- import memcache
- cls.cache = memcache.Client(cls.servers)
- setup = classmethod(setup)
-
- def _get_id(self):
- return self._id
- def _set_id(self, value):
- # This encode() call is where we differ from the superclass.
- # Memcache keys MUST be byte strings, not unicode.
- if isinstance(value, unicodestr):
- value = value.encode('utf-8')
-
- self._id = value
- for o in self.id_observers:
- o(value)
- id = property(_get_id, _set_id, doc="The current session ID.")
-
- def _exists(self):
- self.mc_lock.acquire()
- try:
- return bool(self.cache.get(self.id))
- finally:
- self.mc_lock.release()
-
- def _load(self):
- self.mc_lock.acquire()
- try:
- return self.cache.get(self.id)
- finally:
- self.mc_lock.release()
-
- def _save(self, expiration_time):
- # Send the expiration time as "Unix time" (seconds since 1/1/1970)
- td = int(time.mktime(expiration_time.timetuple()))
- self.mc_lock.acquire()
- try:
- if not self.cache.set(self.id, (self._data, expiration_time), td):
- raise AssertionError("Session data for id %r not set." % self.id)
- finally:
- self.mc_lock.release()
-
- def _delete(self):
- self.cache.delete(self.id)
-
- def acquire_lock(self):
- """Acquire an exclusive lock on the currently-loaded session data."""
- self.locked = True
- self.locks.setdefault(self.id, threading.RLock()).acquire()
-
- def release_lock(self):
- """Release the lock on the currently-loaded session data."""
- self.locks[self.id].release()
- self.locked = False
-
- def __len__(self):
- """Return the number of active sessions."""
- raise NotImplementedError
-
-
-# Hook functions (for CherryPy tools)
-
-def save():
- """Save any changed session data."""
-
- if not hasattr(cherrypy.serving, "session"):
- return
- request = cherrypy.serving.request
- response = cherrypy.serving.response
-
- # Guard against running twice
- if hasattr(request, "_sessionsaved"):
- return
- request._sessionsaved = True
-
- if response.stream:
- # If the body is being streamed, we have to save the data
- # *after* the response has been written out
- request.hooks.attach('on_end_request', cherrypy.session.save)
- else:
- # If the body is not being streamed, we save the data now
- # (so we can release the lock).
- if isinstance(response.body, types.GeneratorType):
- response.collapse_body()
- cherrypy.session.save()
-save.failsafe = True
-
-def close():
- """Close the session object for this request."""
- sess = getattr(cherrypy.serving, "session", None)
- if getattr(sess, "locked", False):
- # If the session is still locked we release the lock
- sess.release_lock()
-close.failsafe = True
-close.priority = 90
-
-
-def init(storage_type='ram', path=None, path_header=None, name='session_id',
- timeout=60, domain=None, secure=False, clean_freq=5,
- persistent=True, httponly=False, debug=False, **kwargs):
- """Initialize session object (using cookies).
-
- storage_type
- One of 'ram', 'file', 'postgresql', 'memcached'. This will be
- used to look up the corresponding class in cherrypy.lib.sessions
- globals. For example, 'file' will use the FileSession class.
-
- path
- The 'path' value to stick in the response cookie metadata.
-
- path_header
- If 'path' is None (the default), then the response
- cookie 'path' will be pulled from request.headers[path_header].
-
- name
- The name of the cookie.
-
- timeout
- The expiration timeout (in minutes) for the stored session data.
- If 'persistent' is True (the default), this is also the timeout
- for the cookie.
-
- domain
- The cookie domain.
-
- secure
- If False (the default) the cookie 'secure' value will not
- be set. If True, the cookie 'secure' value will be set (to 1).
-
- clean_freq (minutes)
- The poll rate for expired session cleanup.
-
- persistent
- If True (the default), the 'timeout' argument will be used
- to expire the cookie. If False, the cookie will not have an expiry,
- and the cookie will be a "session cookie" which expires when the
- browser is closed.
-
- httponly
- If False (the default) the cookie 'httponly' value will not be set.
- If True, the cookie 'httponly' value will be set (to 1).
-
- Any additional kwargs will be bound to the new Session instance,
- and may be specific to the storage type. See the subclass of Session
- you're using for more information.
- """
-
- request = cherrypy.serving.request
-
- # Guard against running twice
- if hasattr(request, "_session_init_flag"):
- return
- request._session_init_flag = True
-
- # Check if request came with a session ID
- id = None
- if name in request.cookie:
- id = request.cookie[name].value
- if debug:
- cherrypy.log('ID obtained from request.cookie: %r' % id,
- 'TOOLS.SESSIONS')
-
- # Find the storage class and call setup (first time only).
- storage_class = storage_type.title() + 'Session'
- storage_class = globals()[storage_class]
- if not hasattr(cherrypy, "session"):
- if hasattr(storage_class, "setup"):
- storage_class.setup(**kwargs)
-
- # Create and attach a new Session instance to cherrypy.serving.
- # It will possess a reference to (and lock, and lazily load)
- # the requested session data.
- kwargs['timeout'] = timeout
- kwargs['clean_freq'] = clean_freq
- cherrypy.serving.session = sess = storage_class(id, **kwargs)
- sess.debug = debug
- def update_cookie(id):
- """Update the cookie every time the session id changes."""
- cherrypy.serving.response.cookie[name] = id
- sess.id_observers.append(update_cookie)
-
- # Create cherrypy.session which will proxy to cherrypy.serving.session
- if not hasattr(cherrypy, "session"):
- cherrypy.session = cherrypy._ThreadLocalProxy('session')
-
- if persistent:
- cookie_timeout = timeout
- else:
- # See http://support.microsoft.com/kb/223799/EN-US/
- # and http://support.mozilla.com/en-US/kb/Cookies
- cookie_timeout = None
- set_response_cookie(path=path, path_header=path_header, name=name,
- timeout=cookie_timeout, domain=domain, secure=secure,
- httponly=httponly)
-
-
-def set_response_cookie(path=None, path_header=None, name='session_id',
- timeout=60, domain=None, secure=False, httponly=False):
- """Set a response cookie for the client.
-
- path
- the 'path' value to stick in the response cookie metadata.
-
- path_header
- if 'path' is None (the default), then the response
- cookie 'path' will be pulled from request.headers[path_header].
-
- name
- the name of the cookie.
-
- timeout
- the expiration timeout for the cookie. If 0 or other boolean
- False, no 'expires' param will be set, and the cookie will be a
- "session cookie" which expires when the browser is closed.
-
- domain
- the cookie domain.
-
- secure
- if False (the default) the cookie 'secure' value will not
- be set. If True, the cookie 'secure' value will be set (to 1).
-
- httponly
- If False (the default) the cookie 'httponly' value will not be set.
- If True, the cookie 'httponly' value will be set (to 1).
-
- """
- # Set response cookie
- cookie = cherrypy.serving.response.cookie
- cookie[name] = cherrypy.serving.session.id
- cookie[name]['path'] = (path or cherrypy.serving.request.headers.get(path_header)
- or '/')
-
- # We'd like to use the "max-age" param as indicated in
- # http://www.faqs.org/rfcs/rfc2109.html but IE doesn't
- # save it to disk and the session is lost if people close
- # the browser. So we have to use the old "expires" ... sigh ...
-## cookie[name]['max-age'] = timeout * 60
- if timeout:
- e = time.time() + (timeout * 60)
- cookie[name]['expires'] = httputil.HTTPDate(e)
- if domain is not None:
- cookie[name]['domain'] = domain
- if secure:
- cookie[name]['secure'] = 1
- if httponly:
- if not cookie[name].isReservedKey('httponly'):
- raise ValueError("The httponly cookie token is not supported.")
- cookie[name]['httponly'] = 1
-
-def expire():
- """Expire the current session cookie."""
- name = cherrypy.serving.request.config.get('tools.sessions.name', 'session_id')
- one_year = 60 * 60 * 24 * 365
- e = time.time() - one_year
- cherrypy.serving.response.cookie[name]['expires'] = httputil.HTTPDate(e)
-
-
diff --git a/python-packages/cherrypy/lib/static.py b/python-packages/cherrypy/lib/static.py
deleted file mode 100644
index 2d1423071b..0000000000
--- a/python-packages/cherrypy/lib/static.py
+++ /dev/null
@@ -1,363 +0,0 @@
-try:
- from io import UnsupportedOperation
-except ImportError:
- UnsupportedOperation = object()
-import logging
-import mimetypes
-mimetypes.init()
-mimetypes.types_map['.dwg']='image/x-dwg'
-mimetypes.types_map['.ico']='image/x-icon'
-mimetypes.types_map['.bz2']='application/x-bzip2'
-mimetypes.types_map['.gz']='application/x-gzip'
-
-import os
-import re
-import stat
-import time
-
-import cherrypy
-from cherrypy._cpcompat import ntob, unquote
-from cherrypy.lib import cptools, httputil, file_generator_limited
-
-
-def serve_file(path, content_type=None, disposition=None, name=None, debug=False):
- """Set status, headers, and body in order to serve the given path.
-
- The Content-Type header will be set to the content_type arg, if provided.
- If not provided, the Content-Type will be guessed by the file extension
- of the 'path' argument.
-
- If disposition is not None, the Content-Disposition header will be set
- to "; filename=". If name is None, it will be set
- to the basename of path. If disposition is None, no Content-Disposition
- header will be written.
- """
-
- response = cherrypy.serving.response
-
- # If path is relative, users should fix it by making path absolute.
- # That is, CherryPy should not guess where the application root is.
- # It certainly should *not* use cwd (since CP may be invoked from a
- # variety of paths). If using tools.staticdir, you can make your relative
- # paths become absolute by supplying a value for "tools.staticdir.root".
- if not os.path.isabs(path):
- msg = "'%s' is not an absolute path." % path
- if debug:
- cherrypy.log(msg, 'TOOLS.STATICFILE')
- raise ValueError(msg)
-
- try:
- st = os.stat(path)
- except OSError:
- if debug:
- cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
- raise cherrypy.NotFound()
-
- # Check if path is a directory.
- if stat.S_ISDIR(st.st_mode):
- # Let the caller deal with it as they like.
- if debug:
- cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
- raise cherrypy.NotFound()
-
- # Set the Last-Modified response header, so that
- # modified-since validation code can work.
- response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
- cptools.validate_since()
-
- if content_type is None:
- # Set content-type based on filename extension
- ext = ""
- i = path.rfind('.')
- if i != -1:
- ext = path[i:].lower()
- content_type = mimetypes.types_map.get(ext, None)
- if content_type is not None:
- response.headers['Content-Type'] = content_type
- if debug:
- cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
-
- cd = None
- if disposition is not None:
- if name is None:
- name = os.path.basename(path)
- cd = '%s; filename="%s"' % (disposition, name)
- response.headers["Content-Disposition"] = cd
- if debug:
- cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
-
- # Set Content-Length and use an iterable (file object)
- # this way CP won't load the whole file in memory
- content_length = st.st_size
- fileobj = open(path, 'rb')
- return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
-
-def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
- debug=False):
- """Set status, headers, and body in order to serve the given file object.
-
- The Content-Type header will be set to the content_type arg, if provided.
-
- If disposition is not None, the Content-Disposition header will be set
- to "; filename=". If name is None, 'filename' will
- not be set. If disposition is None, no Content-Disposition header will
- be written.
-
- CAUTION: If the request contains a 'Range' header, one or more seek()s will
- be performed on the file object. This may cause undesired behavior if
- the file object is not seekable. It could also produce undesired results
- if the caller set the read position of the file object prior to calling
- serve_fileobj(), expecting that the data would be served starting from that
- position.
- """
-
- response = cherrypy.serving.response
-
- try:
- st = os.fstat(fileobj.fileno())
- except AttributeError:
- if debug:
- cherrypy.log('os has no fstat attribute', 'TOOLS.STATIC')
- content_length = None
- except UnsupportedOperation:
- content_length = None
- else:
- # Set the Last-Modified response header, so that
- # modified-since validation code can work.
- response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
- cptools.validate_since()
- content_length = st.st_size
-
- if content_type is not None:
- response.headers['Content-Type'] = content_type
- if debug:
- cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
-
- cd = None
- if disposition is not None:
- if name is None:
- cd = disposition
- else:
- cd = '%s; filename="%s"' % (disposition, name)
- response.headers["Content-Disposition"] = cd
- if debug:
- cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
-
- return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
-
-def _serve_fileobj(fileobj, content_type, content_length, debug=False):
- """Internal. Set response.body to the given file object, perhaps ranged."""
- response = cherrypy.serving.response
-
- # HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
- request = cherrypy.serving.request
- if request.protocol >= (1, 1):
- response.headers["Accept-Ranges"] = "bytes"
- r = httputil.get_ranges(request.headers.get('Range'), content_length)
- if r == []:
- response.headers['Content-Range'] = "bytes */%s" % content_length
- message = "Invalid Range (first-byte-pos greater than Content-Length)"
- if debug:
- cherrypy.log(message, 'TOOLS.STATIC')
- raise cherrypy.HTTPError(416, message)
-
- if r:
- if len(r) == 1:
- # Return a single-part response.
- start, stop = r[0]
- if stop > content_length:
- stop = content_length
- r_len = stop - start
- if debug:
- cherrypy.log('Single part; start: %r, stop: %r' % (start, stop),
- 'TOOLS.STATIC')
- response.status = "206 Partial Content"
- response.headers['Content-Range'] = (
- "bytes %s-%s/%s" % (start, stop - 1, content_length))
- response.headers['Content-Length'] = r_len
- fileobj.seek(start)
- response.body = file_generator_limited(fileobj, r_len)
- else:
- # Return a multipart/byteranges response.
- response.status = "206 Partial Content"
- try:
- # Python 3
- from email.generator import _make_boundary as choose_boundary
- except ImportError:
- # Python 2
- from mimetools import choose_boundary
- boundary = choose_boundary()
- ct = "multipart/byteranges; boundary=%s" % boundary
- response.headers['Content-Type'] = ct
- if "Content-Length" in response.headers:
- # Delete Content-Length header so finalize() recalcs it.
- del response.headers["Content-Length"]
-
- def file_ranges():
- # Apache compatibility:
- yield ntob("\r\n")
-
- for start, stop in r:
- if debug:
- cherrypy.log('Multipart; start: %r, stop: %r' % (start, stop),
- 'TOOLS.STATIC')
- yield ntob("--" + boundary, 'ascii')
- yield ntob("\r\nContent-type: %s" % content_type, 'ascii')
- yield ntob("\r\nContent-range: bytes %s-%s/%s\r\n\r\n"
- % (start, stop - 1, content_length), 'ascii')
- fileobj.seek(start)
- for chunk in file_generator_limited(fileobj, stop-start):
- yield chunk
- yield ntob("\r\n")
- # Final boundary
- yield ntob("--" + boundary + "--", 'ascii')
-
- # Apache compatibility:
- yield ntob("\r\n")
- response.body = file_ranges()
- return response.body
- else:
- if debug:
- cherrypy.log('No byteranges requested', 'TOOLS.STATIC')
-
- # Set Content-Length and use an iterable (file object)
- # this way CP won't load the whole file in memory
- response.headers['Content-Length'] = content_length
- response.body = fileobj
- return response.body
-
-def serve_download(path, name=None):
- """Serve 'path' as an application/x-download attachment."""
- # This is such a common idiom I felt it deserved its own wrapper.
- return serve_file(path, "application/x-download", "attachment", name)
-
-
-def _attempt(filename, content_types, debug=False):
- if debug:
- cherrypy.log('Attempting %r (content_types %r)' %
- (filename, content_types), 'TOOLS.STATICDIR')
- try:
- # you can set the content types for a
- # complete directory per extension
- content_type = None
- if content_types:
- r, ext = os.path.splitext(filename)
- content_type = content_types.get(ext[1:], None)
- serve_file(filename, content_type=content_type, debug=debug)
- return True
- except cherrypy.NotFound:
- # If we didn't find the static file, continue handling the
- # request. We might find a dynamic handler instead.
- if debug:
- cherrypy.log('NotFound', 'TOOLS.STATICFILE')
- return False
-
-def staticdir(section, dir, root="", match="", content_types=None, index="",
- debug=False):
- """Serve a static resource from the given (root +) dir.
-
- match
- If given, request.path_info will be searched for the given
- regular expression before attempting to serve static content.
-
- content_types
- If given, it should be a Python dictionary of
- {file-extension: content-type} pairs, where 'file-extension' is
- a string (e.g. "gif") and 'content-type' is the value to write
- out in the Content-Type response header (e.g. "image/gif").
-
- index
- If provided, it should be the (relative) name of a file to
- serve for directory requests. For example, if the dir argument is
- '/home/me', the Request-URI is 'myapp', and the index arg is
- 'index.html', the file '/home/me/myapp/index.html' will be sought.
- """
- request = cherrypy.serving.request
- if request.method not in ('GET', 'HEAD'):
- if debug:
- cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICDIR')
- return False
-
- if match and not re.search(match, request.path_info):
- if debug:
- cherrypy.log('request.path_info %r does not match pattern %r' %
- (request.path_info, match), 'TOOLS.STATICDIR')
- return False
-
- # Allow the use of '~' to refer to a user's home directory.
- dir = os.path.expanduser(dir)
-
- # If dir is relative, make absolute using "root".
- if not os.path.isabs(dir):
- if not root:
- msg = "Static dir requires an absolute dir (or root)."
- if debug:
- cherrypy.log(msg, 'TOOLS.STATICDIR')
- raise ValueError(msg)
- dir = os.path.join(root, dir)
-
- # Determine where we are in the object tree relative to 'section'
- # (where the static tool was defined).
- if section == 'global':
- section = "/"
- section = section.rstrip(r"\/")
- branch = request.path_info[len(section) + 1:]
- branch = unquote(branch.lstrip(r"\/"))
-
- # If branch is "", filename will end in a slash
- filename = os.path.join(dir, branch)
- if debug:
- cherrypy.log('Checking file %r to fulfill %r' %
- (filename, request.path_info), 'TOOLS.STATICDIR')
-
- # There's a chance that the branch pulled from the URL might
- # have ".." or similar uplevel attacks in it. Check that the final
- # filename is a child of dir.
- if not os.path.normpath(filename).startswith(os.path.normpath(dir)):
- raise cherrypy.HTTPError(403) # Forbidden
-
- handled = _attempt(filename, content_types)
- if not handled:
- # Check for an index file if a folder was requested.
- if index:
- handled = _attempt(os.path.join(filename, index), content_types)
- if handled:
- request.is_index = filename[-1] in (r"\/")
- return handled
-
-def staticfile(filename, root=None, match="", content_types=None, debug=False):
- """Serve a static resource from the given (root +) filename.
-
- match
- If given, request.path_info will be searched for the given
- regular expression before attempting to serve static content.
-
- content_types
- If given, it should be a Python dictionary of
- {file-extension: content-type} pairs, where 'file-extension' is
- a string (e.g. "gif") and 'content-type' is the value to write
- out in the Content-Type response header (e.g. "image/gif").
-
- """
- request = cherrypy.serving.request
- if request.method not in ('GET', 'HEAD'):
- if debug:
- cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')
- return False
-
- if match and not re.search(match, request.path_info):
- if debug:
- cherrypy.log('request.path_info %r does not match pattern %r' %
- (request.path_info, match), 'TOOLS.STATICFILE')
- return False
-
- # If filename is relative, make absolute using "root".
- if not os.path.isabs(filename):
- if not root:
- msg = "Static tool requires an absolute filename (got '%s')." % filename
- if debug:
- cherrypy.log(msg, 'TOOLS.STATICFILE')
- raise ValueError(msg)
- filename = os.path.join(root, filename)
-
- return _attempt(filename, content_types, debug=debug)
diff --git a/python-packages/cherrypy/lib/xmlrpcutil.py b/python-packages/cherrypy/lib/xmlrpcutil.py
deleted file mode 100644
index 9a44464bc0..0000000000
--- a/python-packages/cherrypy/lib/xmlrpcutil.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import sys
-
-import cherrypy
-from cherrypy._cpcompat import ntob
-
-def get_xmlrpclib():
- try:
- import xmlrpc.client as x
- except ImportError:
- import xmlrpclib as x
- return x
-
-def process_body():
- """Return (params, method) from request body."""
- try:
- return get_xmlrpclib().loads(cherrypy.request.body.read())
- except Exception:
- return ('ERROR PARAMS', ), 'ERRORMETHOD'
-
-
-def patched_path(path):
- """Return 'path', doctored for RPC."""
- if not path.endswith('/'):
- path += '/'
- if path.startswith('/RPC2/'):
- # strip the first /rpc2
- path = path[5:]
- return path
-
-
-def _set_response(body):
- # The XML-RPC spec (http://www.xmlrpc.com/spec) says:
- # "Unless there's a lower-level error, always return 200 OK."
- # Since Python's xmlrpclib interprets a non-200 response
- # as a "Protocol Error", we'll just return 200 every time.
- response = cherrypy.response
- response.status = '200 OK'
- response.body = ntob(body, 'utf-8')
- response.headers['Content-Type'] = 'text/xml'
- response.headers['Content-Length'] = len(body)
-
-
-def respond(body, encoding='utf-8', allow_none=0):
- xmlrpclib = get_xmlrpclib()
- if not isinstance(body, xmlrpclib.Fault):
- body = (body,)
- _set_response(xmlrpclib.dumps(body, methodresponse=1,
- encoding=encoding,
- allow_none=allow_none))
-
-def on_error(*args, **kwargs):
- body = str(sys.exc_info()[1])
- xmlrpclib = get_xmlrpclib()
- _set_response(xmlrpclib.dumps(xmlrpclib.Fault(1, body)))
-
diff --git a/python-packages/cherrypy/process/__init__.py b/python-packages/cherrypy/process/__init__.py
deleted file mode 100644
index f15b12370a..0000000000
--- a/python-packages/cherrypy/process/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""Site container for an HTTP server.
-
-A Web Site Process Bus object is used to connect applications, servers,
-and frameworks with site-wide services such as daemonization, process
-reload, signal handling, drop privileges, PID file management, logging
-for all of these, and many more.
-
-The 'plugins' module defines a few abstract and concrete services for
-use with the bus. Some use tool-specific channels; see the documentation
-for each class.
-"""
-
-from cherrypy.process.wspbus import bus
-from cherrypy.process import plugins, servers
diff --git a/python-packages/cherrypy/process/plugins.py b/python-packages/cherrypy/process/plugins.py
deleted file mode 100644
index ba618a0bd0..0000000000
--- a/python-packages/cherrypy/process/plugins.py
+++ /dev/null
@@ -1,683 +0,0 @@
-"""Site services for use with a Web Site Process Bus."""
-
-import os
-import re
-import signal as _signal
-import sys
-import time
-import threading
-
-from cherrypy._cpcompat import basestring, get_daemon, get_thread_ident, ntob, set
-
-# _module__file__base is used by Autoreload to make
-# absolute any filenames retrieved from sys.modules which are not
-# already absolute paths. This is to work around Python's quirk
-# of importing the startup script and using a relative filename
-# for it in sys.modules.
-#
-# Autoreload examines sys.modules afresh every time it runs. If an application
-# changes the current directory by executing os.chdir(), then the next time
-# Autoreload runs, it will not be able to find any filenames which are
-# not absolute paths, because the current directory is not the same as when the
-# module was first imported. Autoreload will then wrongly conclude the file has
-# "changed", and initiate the shutdown/re-exec sequence.
-# See ticket #917.
-# For this workaround to have a decent probability of success, this module
-# needs to be imported as early as possible, before the app has much chance
-# to change the working directory.
-_module__file__base = os.getcwd()
-
-
-class SimplePlugin(object):
- """Plugin base class which auto-subscribes methods for known channels."""
-
- bus = None
- """A :class:`Bus `, usually cherrypy.engine."""
-
- def __init__(self, bus):
- self.bus = bus
-
- def subscribe(self):
- """Register this object as a (multi-channel) listener on the bus."""
- for channel in self.bus.listeners:
- # Subscribe self.start, self.exit, etc. if present.
- method = getattr(self, channel, None)
- if method is not None:
- self.bus.subscribe(channel, method)
-
- def unsubscribe(self):
- """Unregister this object as a listener on the bus."""
- for channel in self.bus.listeners:
- # Unsubscribe self.start, self.exit, etc. if present.
- method = getattr(self, channel, None)
- if method is not None:
- self.bus.unsubscribe(channel, method)
-
-
-
-class SignalHandler(object):
- """Register bus channels (and listeners) for system signals.
-
- You can modify what signals your application listens for, and what it does
- when it receives signals, by modifying :attr:`SignalHandler.handlers`,
- a dict of {signal name: callback} pairs. The default set is::
-
- handlers = {'SIGTERM': self.bus.exit,
- 'SIGHUP': self.handle_SIGHUP,
- 'SIGUSR1': self.bus.graceful,
- }
-
- The :func:`SignalHandler.handle_SIGHUP`` method calls
- :func:`bus.restart()`
- if the process is daemonized, but
- :func:`bus.exit()`
- if the process is attached to a TTY. This is because Unix window
- managers tend to send SIGHUP to terminal windows when the user closes them.
-
- Feel free to add signals which are not available on every platform. The
- :class:`SignalHandler` will ignore errors raised from attempting to register
- handlers for unknown signals.
- """
-
- handlers = {}
- """A map from signal names (e.g. 'SIGTERM') to handlers (e.g. bus.exit)."""
-
- signals = {}
- """A map from signal numbers to names."""
-
- for k, v in vars(_signal).items():
- if k.startswith('SIG') and not k.startswith('SIG_'):
- signals[v] = k
- del k, v
-
- def __init__(self, bus):
- self.bus = bus
- # Set default handlers
- self.handlers = {'SIGTERM': self.bus.exit,
- 'SIGHUP': self.handle_SIGHUP,
- 'SIGUSR1': self.bus.graceful,
- }
-
- if sys.platform[:4] == 'java':
- del self.handlers['SIGUSR1']
- self.handlers['SIGUSR2'] = self.bus.graceful
- self.bus.log("SIGUSR1 cannot be set on the JVM platform. "
- "Using SIGUSR2 instead.")
- self.handlers['SIGINT'] = self._jython_SIGINT_handler
-
- self._previous_handlers = {}
-
- def _jython_SIGINT_handler(self, signum=None, frame=None):
- # See http://bugs.jython.org/issue1313
- self.bus.log('Keyboard Interrupt: shutting down bus')
- self.bus.exit()
-
- def subscribe(self):
- """Subscribe self.handlers to signals."""
- for sig, func in self.handlers.items():
- try:
- self.set_handler(sig, func)
- except ValueError:
- pass
-
- def unsubscribe(self):
- """Unsubscribe self.handlers from signals."""
- for signum, handler in self._previous_handlers.items():
- signame = self.signals[signum]
-
- if handler is None:
- self.bus.log("Restoring %s handler to SIG_DFL." % signame)
- handler = _signal.SIG_DFL
- else:
- self.bus.log("Restoring %s handler %r." % (signame, handler))
-
- try:
- our_handler = _signal.signal(signum, handler)
- if our_handler is None:
- self.bus.log("Restored old %s handler %r, but our "
- "handler was not registered." %
- (signame, handler), level=30)
- except ValueError:
- self.bus.log("Unable to restore %s handler %r." %
- (signame, handler), level=40, traceback=True)
-
- def set_handler(self, signal, listener=None):
- """Subscribe a handler for the given signal (number or name).
-
- If the optional 'listener' argument is provided, it will be
- subscribed as a listener for the given signal's channel.
-
- If the given signal name or number is not available on the current
- platform, ValueError is raised.
- """
- if isinstance(signal, basestring):
- signum = getattr(_signal, signal, None)
- if signum is None:
- raise ValueError("No such signal: %r" % signal)
- signame = signal
- else:
- try:
- signame = self.signals[signal]
- except KeyError:
- raise ValueError("No such signal: %r" % signal)
- signum = signal
-
- prev = _signal.signal(signum, self._handle_signal)
- self._previous_handlers[signum] = prev
-
- if listener is not None:
- self.bus.log("Listening for %s." % signame)
- self.bus.subscribe(signame, listener)
-
- def _handle_signal(self, signum=None, frame=None):
- """Python signal handler (self.set_handler subscribes it for you)."""
- signame = self.signals[signum]
- self.bus.log("Caught signal %s." % signame)
- self.bus.publish(signame)
-
- def handle_SIGHUP(self):
- """Restart if daemonized, else exit."""
- if os.isatty(sys.stdin.fileno()):
- # not daemonized (may be foreground or background)
- self.bus.log("SIGHUP caught but not daemonized. Exiting.")
- self.bus.exit()
- else:
- self.bus.log("SIGHUP caught while daemonized. Restarting.")
- self.bus.restart()
-
-
-try:
- import pwd, grp
-except ImportError:
- pwd, grp = None, None
-
-
-class DropPrivileges(SimplePlugin):
- """Drop privileges. uid/gid arguments not available on Windows.
-
- Special thanks to Gavin Baker: http://antonym.org/node/100.
- """
-
- def __init__(self, bus, umask=None, uid=None, gid=None):
- SimplePlugin.__init__(self, bus)
- self.finalized = False
- self.uid = uid
- self.gid = gid
- self.umask = umask
-
- def _get_uid(self):
- return self._uid
- def _set_uid(self, val):
- if val is not None:
- if pwd is None:
- self.bus.log("pwd module not available; ignoring uid.",
- level=30)
- val = None
- elif isinstance(val, basestring):
- val = pwd.getpwnam(val)[2]
- self._uid = val
- uid = property(_get_uid, _set_uid,
- doc="The uid under which to run. Availability: Unix.")
-
- def _get_gid(self):
- return self._gid
- def _set_gid(self, val):
- if val is not None:
- if grp is None:
- self.bus.log("grp module not available; ignoring gid.",
- level=30)
- val = None
- elif isinstance(val, basestring):
- val = grp.getgrnam(val)[2]
- self._gid = val
- gid = property(_get_gid, _set_gid,
- doc="The gid under which to run. Availability: Unix.")
-
- def _get_umask(self):
- return self._umask
- def _set_umask(self, val):
- if val is not None:
- try:
- os.umask
- except AttributeError:
- self.bus.log("umask function not available; ignoring umask.",
- level=30)
- val = None
- self._umask = val
- umask = property(_get_umask, _set_umask,
- doc="""The default permission mode for newly created files and directories.
-
- Usually expressed in octal format, for example, ``0644``.
- Availability: Unix, Windows.
- """)
-
- def start(self):
- # uid/gid
- def current_ids():
- """Return the current (uid, gid) if available."""
- name, group = None, None
- if pwd:
- name = pwd.getpwuid(os.getuid())[0]
- if grp:
- group = grp.getgrgid(os.getgid())[0]
- return name, group
-
- if self.finalized:
- if not (self.uid is None and self.gid is None):
- self.bus.log('Already running as uid: %r gid: %r' %
- current_ids())
- else:
- if self.uid is None and self.gid is None:
- if pwd or grp:
- self.bus.log('uid/gid not set', level=30)
- else:
- self.bus.log('Started as uid: %r gid: %r' % current_ids())
- if self.gid is not None:
- os.setgid(self.gid)
- os.setgroups([])
- if self.uid is not None:
- os.setuid(self.uid)
- self.bus.log('Running as uid: %r gid: %r' % current_ids())
-
- # umask
- if self.finalized:
- if self.umask is not None:
- self.bus.log('umask already set to: %03o' % self.umask)
- else:
- if self.umask is None:
- self.bus.log('umask not set', level=30)
- else:
- old_umask = os.umask(self.umask)
- self.bus.log('umask old: %03o, new: %03o' %
- (old_umask, self.umask))
-
- self.finalized = True
- # This is slightly higher than the priority for server.start
- # in order to facilitate the most common use: starting on a low
- # port (which requires root) and then dropping to another user.
- start.priority = 77
-
-
-class Daemonizer(SimplePlugin):
- """Daemonize the running script.
-
- Use this with a Web Site Process Bus via::
-
- Daemonizer(bus).subscribe()
-
- When this component finishes, the process is completely decoupled from
- the parent environment. Please note that when this component is used,
- the return code from the parent process will still be 0 if a startup
- error occurs in the forked children. Errors in the initial daemonizing
- process still return proper exit codes. Therefore, if you use this
- plugin to daemonize, don't use the return code as an accurate indicator
- of whether the process fully started. In fact, that return code only
- indicates if the process succesfully finished the first fork.
- """
-
- def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
- stderr='/dev/null'):
- SimplePlugin.__init__(self, bus)
- self.stdin = stdin
- self.stdout = stdout
- self.stderr = stderr
- self.finalized = False
-
- def start(self):
- if self.finalized:
- self.bus.log('Already deamonized.')
-
- # forking has issues with threads:
- # http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
- # "The general problem with making fork() work in a multi-threaded
- # world is what to do with all of the threads..."
- # So we check for active threads:
- if threading.activeCount() != 1:
- self.bus.log('There are %r active threads. '
- 'Daemonizing now may cause strange failures.' %
- threading.enumerate(), level=30)
-
- # See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
- # (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
- # and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
-
- # Finish up with the current stdout/stderr
- sys.stdout.flush()
- sys.stderr.flush()
-
- # Do first fork.
- try:
- pid = os.fork()
- if pid == 0:
- # This is the child process. Continue.
- pass
- else:
- # This is the first parent. Exit, now that we've forked.
- self.bus.log('Forking once.')
- os._exit(0)
- except OSError:
- # Python raises OSError rather than returning negative numbers.
- exc = sys.exc_info()[1]
- sys.exit("%s: fork #1 failed: (%d) %s\n"
- % (sys.argv[0], exc.errno, exc.strerror))
-
- os.setsid()
-
- # Do second fork
- try:
- pid = os.fork()
- if pid > 0:
- self.bus.log('Forking twice.')
- os._exit(0) # Exit second parent
- except OSError:
- exc = sys.exc_info()[1]
- sys.exit("%s: fork #2 failed: (%d) %s\n"
- % (sys.argv[0], exc.errno, exc.strerror))
-
- os.chdir("/")
- os.umask(0)
-
- si = open(self.stdin, "r")
- so = open(self.stdout, "a+")
- se = open(self.stderr, "a+")
-
- # os.dup2(fd, fd2) will close fd2 if necessary,
- # so we don't explicitly close stdin/out/err.
- # See http://docs.python.org/lib/os-fd-ops.html
- os.dup2(si.fileno(), sys.stdin.fileno())
- os.dup2(so.fileno(), sys.stdout.fileno())
- os.dup2(se.fileno(), sys.stderr.fileno())
-
- self.bus.log('Daemonized to PID: %s' % os.getpid())
- self.finalized = True
- start.priority = 65
-
-
-class PIDFile(SimplePlugin):
- """Maintain a PID file via a WSPBus."""
-
- def __init__(self, bus, pidfile):
- SimplePlugin.__init__(self, bus)
- self.pidfile = pidfile
- self.finalized = False
-
- def start(self):
- pid = os.getpid()
- if self.finalized:
- self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
- else:
- open(self.pidfile, "wb").write(ntob("%s" % pid, 'utf8'))
- self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
- self.finalized = True
- start.priority = 70
-
- def exit(self):
- try:
- os.remove(self.pidfile)
- self.bus.log('PID file removed: %r.' % self.pidfile)
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- pass
-
-
-class PerpetualTimer(threading._Timer):
- """A responsive subclass of threading._Timer whose run() method repeats.
-
- Use this timer only when you really need a very interruptible timer;
- this checks its 'finished' condition up to 20 times a second, which can
- results in pretty high CPU usage
- """
-
- def run(self):
- while True:
- self.finished.wait(self.interval)
- if self.finished.isSet():
- return
- try:
- self.function(*self.args, **self.kwargs)
- except Exception:
- self.bus.log("Error in perpetual timer thread function %r." %
- self.function, level=40, traceback=True)
- # Quit on first error to avoid massive logs.
- raise
-
-
-class BackgroundTask(threading.Thread):
- """A subclass of threading.Thread whose run() method repeats.
-
- Use this class for most repeating tasks. It uses time.sleep() to wait
- for each interval, which isn't very responsive; that is, even if you call
- self.cancel(), you'll have to wait until the sleep() call finishes before
- the thread stops. To compensate, it defaults to being daemonic, which means
- it won't delay stopping the whole process.
- """
-
- def __init__(self, interval, function, args=[], kwargs={}, bus=None):
- threading.Thread.__init__(self)
- self.interval = interval
- self.function = function
- self.args = args
- self.kwargs = kwargs
- self.running = False
- self.bus = bus
-
- def cancel(self):
- self.running = False
-
- def run(self):
- self.running = True
- while self.running:
- time.sleep(self.interval)
- if not self.running:
- return
- try:
- self.function(*self.args, **self.kwargs)
- except Exception:
- if self.bus:
- self.bus.log("Error in background task thread function %r."
- % self.function, level=40, traceback=True)
- # Quit on first error to avoid massive logs.
- raise
-
- def _set_daemon(self):
- return True
-
-
-class Monitor(SimplePlugin):
- """WSPBus listener to periodically run a callback in its own thread."""
-
- callback = None
- """The function to call at intervals."""
-
- frequency = 60
- """The time in seconds between callback runs."""
-
- thread = None
- """A :class:`BackgroundTask` thread."""
-
- def __init__(self, bus, callback, frequency=60, name=None):
- SimplePlugin.__init__(self, bus)
- self.callback = callback
- self.frequency = frequency
- self.thread = None
- self.name = name
-
- def start(self):
- """Start our callback in its own background thread."""
- if self.frequency > 0:
- threadname = self.name or self.__class__.__name__
- if self.thread is None:
- self.thread = BackgroundTask(self.frequency, self.callback,
- bus = self.bus)
- self.thread.setName(threadname)
- self.thread.start()
- self.bus.log("Started monitor thread %r." % threadname)
- else:
- self.bus.log("Monitor thread %r already started." % threadname)
- start.priority = 70
-
- def stop(self):
- """Stop our callback's background task thread."""
- if self.thread is None:
- self.bus.log("No thread running for %s." % self.name or self.__class__.__name__)
- else:
- if self.thread is not threading.currentThread():
- name = self.thread.getName()
- self.thread.cancel()
- if not get_daemon(self.thread):
- self.bus.log("Joining %r" % name)
- self.thread.join()
- self.bus.log("Stopped thread %r." % name)
- self.thread = None
-
- def graceful(self):
- """Stop the callback's background task thread and restart it."""
- self.stop()
- self.start()
-
-
-class Autoreloader(Monitor):
- """Monitor which re-executes the process when files change.
-
- This :ref:`plugin` restarts the process (via :func:`os.execv`)
- if any of the files it monitors change (or is deleted). By default, the
- autoreloader monitors all imported modules; you can add to the
- set by adding to ``autoreload.files``::
-
- cherrypy.engine.autoreload.files.add(myFile)
-
- If there are imported files you do *not* wish to monitor, you can adjust the
- ``match`` attribute, a regular expression. For example, to stop monitoring
- cherrypy itself::
-
- cherrypy.engine.autoreload.match = r'^(?!cherrypy).+'
-
- Like all :class:`Monitor` plugins,
- the autoreload plugin takes a ``frequency`` argument. The default is
- 1 second; that is, the autoreloader will examine files once each second.
- """
-
- files = None
- """The set of files to poll for modifications."""
-
- frequency = 1
- """The interval in seconds at which to poll for modified files."""
-
- match = '.*'
- """A regular expression by which to match filenames."""
-
- def __init__(self, bus, frequency=1, match='.*'):
- self.mtimes = {}
- self.files = set()
- self.match = match
- Monitor.__init__(self, bus, self.run, frequency)
-
- def start(self):
- """Start our own background task thread for self.run."""
- if self.thread is None:
- self.mtimes = {}
- Monitor.start(self)
- start.priority = 70
-
- def sysfiles(self):
- """Return a Set of sys.modules filenames to monitor."""
- files = set()
- for k, m in sys.modules.items():
- if re.match(self.match, k):
- if hasattr(m, '__loader__') and hasattr(m.__loader__, 'archive'):
- f = m.__loader__.archive
- else:
- f = getattr(m, '__file__', None)
- if f is not None and not os.path.isabs(f):
- # ensure absolute paths so a os.chdir() in the app doesn't break me
- f = os.path.normpath(os.path.join(_module__file__base, f))
- files.add(f)
- return files
-
- def run(self):
- """Reload the process if registered files have been modified."""
- for filename in self.sysfiles() | self.files:
- if filename:
- if filename.endswith('.pyc'):
- filename = filename[:-1]
-
- oldtime = self.mtimes.get(filename, 0)
- if oldtime is None:
- # Module with no .py file. Skip it.
- continue
-
- try:
- mtime = os.stat(filename).st_mtime
- except OSError:
- # Either a module with no .py file, or it's been deleted.
- mtime = None
-
- if filename not in self.mtimes:
- # If a module has no .py file, this will be None.
- self.mtimes[filename] = mtime
- else:
- if mtime is None or mtime > oldtime:
- # The file has been deleted or modified.
- self.bus.log("Restarting because %s changed." % filename)
- self.thread.cancel()
- self.bus.log("Stopped thread %r." % self.thread.getName())
- self.bus.restart()
- return
-
-
-class ThreadManager(SimplePlugin):
- """Manager for HTTP request threads.
-
- If you have control over thread creation and destruction, publish to
- the 'acquire_thread' and 'release_thread' channels (for each thread).
- This will register/unregister the current thread and publish to
- 'start_thread' and 'stop_thread' listeners in the bus as needed.
-
- If threads are created and destroyed by code you do not control
- (e.g., Apache), then, at the beginning of every HTTP request,
- publish to 'acquire_thread' only. You should not publish to
- 'release_thread' in this case, since you do not know whether
- the thread will be re-used or not. The bus will call
- 'stop_thread' listeners for you when it stops.
- """
-
- threads = None
- """A map of {thread ident: index number} pairs."""
-
- def __init__(self, bus):
- self.threads = {}
- SimplePlugin.__init__(self, bus)
- self.bus.listeners.setdefault('acquire_thread', set())
- self.bus.listeners.setdefault('start_thread', set())
- self.bus.listeners.setdefault('release_thread', set())
- self.bus.listeners.setdefault('stop_thread', set())
-
- def acquire_thread(self):
- """Run 'start_thread' listeners for the current thread.
-
- If the current thread has already been seen, any 'start_thread'
- listeners will not be run again.
- """
- thread_ident = get_thread_ident()
- if thread_ident not in self.threads:
- # We can't just use get_ident as the thread ID
- # because some platforms reuse thread ID's.
- i = len(self.threads) + 1
- self.threads[thread_ident] = i
- self.bus.publish('start_thread', i)
-
- def release_thread(self):
- """Release the current thread and run 'stop_thread' listeners."""
- thread_ident = get_thread_ident()
- i = self.threads.pop(thread_ident, None)
- if i is not None:
- self.bus.publish('stop_thread', i)
-
- def stop(self):
- """Release all threads and run all 'stop_thread' listeners."""
- for thread_ident, i in self.threads.items():
- self.bus.publish('stop_thread', i)
- self.threads.clear()
- graceful = stop
-
diff --git a/python-packages/cherrypy/process/servers.py b/python-packages/cherrypy/process/servers.py
deleted file mode 100644
index fa714d65e8..0000000000
--- a/python-packages/cherrypy/process/servers.py
+++ /dev/null
@@ -1,427 +0,0 @@
-"""
-Starting in CherryPy 3.1, cherrypy.server is implemented as an
-:ref:`Engine Plugin`. It's an instance of
-:class:`cherrypy._cpserver.Server`, which is a subclass of
-:class:`cherrypy.process.servers.ServerAdapter`. The ``ServerAdapter`` class
-is designed to control other servers, as well.
-
-Multiple servers/ports
-======================
-
-If you need to start more than one HTTP server (to serve on multiple ports, or
-protocols, etc.), you can manually register each one and then start them all
-with engine.start::
-
- s1 = ServerAdapter(cherrypy.engine, MyWSGIServer(host='0.0.0.0', port=80))
- s2 = ServerAdapter(cherrypy.engine, another.HTTPServer(host='127.0.0.1', SSL=True))
- s1.subscribe()
- s2.subscribe()
- cherrypy.engine.start()
-
-.. index:: SCGI
-
-FastCGI/SCGI
-============
-
-There are also Flup\ **F**\ CGIServer and Flup\ **S**\ CGIServer classes in
-:mod:`cherrypy.process.servers`. To start an fcgi server, for example,
-wrap an instance of it in a ServerAdapter::
-
- addr = ('0.0.0.0', 4000)
- f = servers.FlupFCGIServer(application=cherrypy.tree, bindAddress=addr)
- s = servers.ServerAdapter(cherrypy.engine, httpserver=f, bind_addr=addr)
- s.subscribe()
-
-The :doc:`cherryd` startup script will do the above for
-you via its `-f` flag.
-Note that you need to download and install `flup `_
-yourself, whether you use ``cherryd`` or not.
-
-.. _fastcgi:
-.. index:: FastCGI
-
-FastCGI
--------
-
-A very simple setup lets your cherry run with FastCGI.
-You just need the flup library,
-plus a running Apache server (with ``mod_fastcgi``) or lighttpd server.
-
-CherryPy code
-^^^^^^^^^^^^^
-
-hello.py::
-
- #!/usr/bin/python
- import cherrypy
-
- class HelloWorld:
- \"""Sample request handler class.\"""
- def index(self):
- return "Hello world!"
- index.exposed = True
-
- cherrypy.tree.mount(HelloWorld())
- # CherryPy autoreload must be disabled for the flup server to work
- cherrypy.config.update({'engine.autoreload_on':False})
-
-Then run :doc:`/deployguide/cherryd` with the '-f' arg::
-
- cherryd -c -d -f -i hello.py
-
-Apache
-^^^^^^
-
-At the top level in httpd.conf::
-
- FastCgiIpcDir /tmp
- FastCgiServer /path/to/cherry.fcgi -idle-timeout 120 -processes 4
-
-And inside the relevant VirtualHost section::
-
- # FastCGI config
- AddHandler fastcgi-script .fcgi
- ScriptAliasMatch (.*$) /path/to/cherry.fcgi$1
-
-Lighttpd
-^^^^^^^^
-
-For `Lighttpd `_ you can follow these
-instructions. Within ``lighttpd.conf`` make sure ``mod_fastcgi`` is
-active within ``server.modules``. Then, within your ``$HTTP["host"]``
-directive, configure your fastcgi script like the following::
-
- $HTTP["url"] =~ "" {
- fastcgi.server = (
- "/" => (
- "script.fcgi" => (
- "bin-path" => "/path/to/your/script.fcgi",
- "socket" => "/tmp/script.sock",
- "check-local" => "disable",
- "disable-time" => 1,
- "min-procs" => 1,
- "max-procs" => 1, # adjust as needed
- ),
- ),
- )
- } # end of $HTTP["url"] =~ "^/"
-
-Please see `Lighttpd FastCGI Docs
-`_ for an explanation
-of the possible configuration options.
-"""
-
-import sys
-import time
-
-
-class ServerAdapter(object):
- """Adapter for an HTTP server.
-
- If you need to start more than one HTTP server (to serve on multiple
- ports, or protocols, etc.), you can manually register each one and then
- start them all with bus.start:
-
- s1 = ServerAdapter(bus, MyWSGIServer(host='0.0.0.0', port=80))
- s2 = ServerAdapter(bus, another.HTTPServer(host='127.0.0.1', SSL=True))
- s1.subscribe()
- s2.subscribe()
- bus.start()
- """
-
- def __init__(self, bus, httpserver=None, bind_addr=None):
- self.bus = bus
- self.httpserver = httpserver
- self.bind_addr = bind_addr
- self.interrupt = None
- self.running = False
-
- def subscribe(self):
- self.bus.subscribe('start', self.start)
- self.bus.subscribe('stop', self.stop)
-
- def unsubscribe(self):
- self.bus.unsubscribe('start', self.start)
- self.bus.unsubscribe('stop', self.stop)
-
- def start(self):
- """Start the HTTP server."""
- if self.bind_addr is None:
- on_what = "unknown interface (dynamic?)"
- elif isinstance(self.bind_addr, tuple):
- host, port = self.bind_addr
- on_what = "%s:%s" % (host, port)
- else:
- on_what = "socket file: %s" % self.bind_addr
-
- if self.running:
- self.bus.log("Already serving on %s" % on_what)
- return
-
- self.interrupt = None
- if not self.httpserver:
- raise ValueError("No HTTP server has been created.")
-
- # Start the httpserver in a new thread.
- if isinstance(self.bind_addr, tuple):
- wait_for_free_port(*self.bind_addr)
-
- import threading
- t = threading.Thread(target=self._start_http_thread)
- t.setName("HTTPServer " + t.getName())
- t.start()
-
- self.wait()
- self.running = True
- self.bus.log("Serving on %s" % on_what)
- start.priority = 75
-
- def _start_http_thread(self):
- """HTTP servers MUST be running in new threads, so that the
- main thread persists to receive KeyboardInterrupt's. If an
- exception is raised in the httpserver's thread then it's
- trapped here, and the bus (and therefore our httpserver)
- are shut down.
- """
- try:
- self.httpserver.start()
- except KeyboardInterrupt:
- self.bus.log(" hit: shutting down HTTP server")
- self.interrupt = sys.exc_info()[1]
- self.bus.exit()
- except SystemExit:
- self.bus.log("SystemExit raised: shutting down HTTP server")
- self.interrupt = sys.exc_info()[1]
- self.bus.exit()
- raise
- except:
- self.interrupt = sys.exc_info()[1]
- self.bus.log("Error in HTTP server: shutting down",
- traceback=True, level=40)
- self.bus.exit()
- raise
-
- def wait(self):
- """Wait until the HTTP server is ready to receive requests."""
- while not getattr(self.httpserver, "ready", False):
- if self.interrupt:
- raise self.interrupt
- time.sleep(.1)
-
- # Wait for port to be occupied
- if isinstance(self.bind_addr, tuple):
- host, port = self.bind_addr
- wait_for_occupied_port(host, port)
-
- def stop(self):
- """Stop the HTTP server."""
- if self.running:
- # stop() MUST block until the server is *truly* stopped.
- self.httpserver.stop()
- # Wait for the socket to be truly freed.
- if isinstance(self.bind_addr, tuple):
- wait_for_free_port(*self.bind_addr)
- self.running = False
- self.bus.log("HTTP Server %s shut down" % self.httpserver)
- else:
- self.bus.log("HTTP Server %s already shut down" % self.httpserver)
- stop.priority = 25
-
- def restart(self):
- """Restart the HTTP server."""
- self.stop()
- self.start()
-
-
-class FlupCGIServer(object):
- """Adapter for a flup.server.cgi.WSGIServer."""
-
- def __init__(self, *args, **kwargs):
- self.args = args
- self.kwargs = kwargs
- self.ready = False
-
- def start(self):
- """Start the CGI server."""
- # We have to instantiate the server class here because its __init__
- # starts a threadpool. If we do it too early, daemonize won't work.
- from flup.server.cgi import WSGIServer
-
- self.cgiserver = WSGIServer(*self.args, **self.kwargs)
- self.ready = True
- self.cgiserver.run()
-
- def stop(self):
- """Stop the HTTP server."""
- self.ready = False
-
-
-class FlupFCGIServer(object):
- """Adapter for a flup.server.fcgi.WSGIServer."""
-
- def __init__(self, *args, **kwargs):
- if kwargs.get('bindAddress', None) is None:
- import socket
- if not hasattr(socket, 'fromfd'):
- raise ValueError(
- 'Dynamic FCGI server not available on this platform. '
- 'You must use a static or external one by providing a '
- 'legal bindAddress.')
- self.args = args
- self.kwargs = kwargs
- self.ready = False
-
- def start(self):
- """Start the FCGI server."""
- # We have to instantiate the server class here because its __init__
- # starts a threadpool. If we do it too early, daemonize won't work.
- from flup.server.fcgi import WSGIServer
- self.fcgiserver = WSGIServer(*self.args, **self.kwargs)
- # TODO: report this bug upstream to flup.
- # If we don't set _oldSIGs on Windows, we get:
- # File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
- # line 108, in run
- # self._restoreSignalHandlers()
- # File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
- # line 156, in _restoreSignalHandlers
- # for signum,handler in self._oldSIGs:
- # AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
- self.fcgiserver._installSignalHandlers = lambda: None
- self.fcgiserver._oldSIGs = []
- self.ready = True
- self.fcgiserver.run()
-
- def stop(self):
- """Stop the HTTP server."""
- # Forcibly stop the fcgi server main event loop.
- self.fcgiserver._keepGoing = False
- # Force all worker threads to die off.
- self.fcgiserver._threadPool.maxSpare = self.fcgiserver._threadPool._idleCount
- self.ready = False
-
-
-class FlupSCGIServer(object):
- """Adapter for a flup.server.scgi.WSGIServer."""
-
- def __init__(self, *args, **kwargs):
- self.args = args
- self.kwargs = kwargs
- self.ready = False
-
- def start(self):
- """Start the SCGI server."""
- # We have to instantiate the server class here because its __init__
- # starts a threadpool. If we do it too early, daemonize won't work.
- from flup.server.scgi import WSGIServer
- self.scgiserver = WSGIServer(*self.args, **self.kwargs)
- # TODO: report this bug upstream to flup.
- # If we don't set _oldSIGs on Windows, we get:
- # File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
- # line 108, in run
- # self._restoreSignalHandlers()
- # File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
- # line 156, in _restoreSignalHandlers
- # for signum,handler in self._oldSIGs:
- # AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
- self.scgiserver._installSignalHandlers = lambda: None
- self.scgiserver._oldSIGs = []
- self.ready = True
- self.scgiserver.run()
-
- def stop(self):
- """Stop the HTTP server."""
- self.ready = False
- # Forcibly stop the scgi server main event loop.
- self.scgiserver._keepGoing = False
- # Force all worker threads to die off.
- self.scgiserver._threadPool.maxSpare = 0
-
-
-def client_host(server_host):
- """Return the host on which a client can connect to the given listener."""
- if server_host == '0.0.0.0':
- # 0.0.0.0 is INADDR_ANY, which should answer on localhost.
- return '127.0.0.1'
- if server_host in ('::', '::0', '::0.0.0.0'):
- # :: is IN6ADDR_ANY, which should answer on localhost.
- # ::0 and ::0.0.0.0 are non-canonical but common ways to write IN6ADDR_ANY.
- return '::1'
- return server_host
-
-def check_port(host, port, timeout=1.0):
- """Raise an error if the given port is not free on the given host."""
- if not host:
- raise ValueError("Host values of '' or None are not allowed.")
- host = client_host(host)
- port = int(port)
-
- import socket
-
- # AF_INET or AF_INET6 socket
- # Get the correct address family for our host (allows IPv6 addresses)
- try:
- info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
- socket.SOCK_STREAM)
- except socket.gaierror:
- if ':' in host:
- info = [(socket.AF_INET6, socket.SOCK_STREAM, 0, "", (host, port, 0, 0))]
- else:
- info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", (host, port))]
-
- for res in info:
- af, socktype, proto, canonname, sa = res
- s = None
- try:
- s = socket.socket(af, socktype, proto)
- # See http://groups.google.com/group/cherrypy-users/
- # browse_frm/thread/bbfe5eb39c904fe0
- s.settimeout(timeout)
- s.connect((host, port))
- s.close()
- raise IOError("Port %s is in use on %s; perhaps the previous "
- "httpserver did not shut down properly." %
- (repr(port), repr(host)))
- except socket.error:
- if s:
- s.close()
-
-
-# Feel free to increase these defaults on slow systems:
-free_port_timeout = 0.1
-occupied_port_timeout = 1.0
-
-def wait_for_free_port(host, port, timeout=None):
- """Wait for the specified port to become free (drop requests)."""
- if not host:
- raise ValueError("Host values of '' or None are not allowed.")
- if timeout is None:
- timeout = free_port_timeout
-
- for trial in range(50):
- try:
- # we are expecting a free port, so reduce the timeout
- check_port(host, port, timeout=timeout)
- except IOError:
- # Give the old server thread time to free the port.
- time.sleep(timeout)
- else:
- return
-
- raise IOError("Port %r not free on %r" % (port, host))
-
-def wait_for_occupied_port(host, port, timeout=None):
- """Wait for the specified port to become active (receive requests)."""
- if not host:
- raise ValueError("Host values of '' or None are not allowed.")
- if timeout is None:
- timeout = occupied_port_timeout
-
- for trial in range(50):
- try:
- check_port(host, port, timeout=timeout)
- except IOError:
- return
- else:
- time.sleep(timeout)
-
- raise IOError("Port %r not bound on %r" % (port, host))
diff --git a/python-packages/cherrypy/process/win32.py b/python-packages/cherrypy/process/win32.py
deleted file mode 100644
index 83f99a5d46..0000000000
--- a/python-packages/cherrypy/process/win32.py
+++ /dev/null
@@ -1,174 +0,0 @@
-"""Windows service. Requires pywin32."""
-
-import os
-import win32api
-import win32con
-import win32event
-import win32service
-import win32serviceutil
-
-from cherrypy.process import wspbus, plugins
-
-
-class ConsoleCtrlHandler(plugins.SimplePlugin):
- """A WSPBus plugin for handling Win32 console events (like Ctrl-C)."""
-
- def __init__(self, bus):
- self.is_set = False
- plugins.SimplePlugin.__init__(self, bus)
-
- def start(self):
- if self.is_set:
- self.bus.log('Handler for console events already set.', level=40)
- return
-
- result = win32api.SetConsoleCtrlHandler(self.handle, 1)
- if result == 0:
- self.bus.log('Could not SetConsoleCtrlHandler (error %r)' %
- win32api.GetLastError(), level=40)
- else:
- self.bus.log('Set handler for console events.', level=40)
- self.is_set = True
-
- def stop(self):
- if not self.is_set:
- self.bus.log('Handler for console events already off.', level=40)
- return
-
- try:
- result = win32api.SetConsoleCtrlHandler(self.handle, 0)
- except ValueError:
- # "ValueError: The object has not been registered"
- result = 1
-
- if result == 0:
- self.bus.log('Could not remove SetConsoleCtrlHandler (error %r)' %
- win32api.GetLastError(), level=40)
- else:
- self.bus.log('Removed handler for console events.', level=40)
- self.is_set = False
-
- def handle(self, event):
- """Handle console control events (like Ctrl-C)."""
- if event in (win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT,
- win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT,
- win32con.CTRL_CLOSE_EVENT):
- self.bus.log('Console event %s: shutting down bus' % event)
-
- # Remove self immediately so repeated Ctrl-C doesn't re-call it.
- try:
- self.stop()
- except ValueError:
- pass
-
- self.bus.exit()
- # 'First to return True stops the calls'
- return 1
- return 0
-
-
-class Win32Bus(wspbus.Bus):
- """A Web Site Process Bus implementation for Win32.
-
- Instead of time.sleep, this bus blocks using native win32event objects.
- """
-
- def __init__(self):
- self.events = {}
- wspbus.Bus.__init__(self)
-
- def _get_state_event(self, state):
- """Return a win32event for the given state (creating it if needed)."""
- try:
- return self.events[state]
- except KeyError:
- event = win32event.CreateEvent(None, 0, 0,
- "WSPBus %s Event (pid=%r)" %
- (state.name, os.getpid()))
- self.events[state] = event
- return event
-
- def _get_state(self):
- return self._state
- def _set_state(self, value):
- self._state = value
- event = self._get_state_event(value)
- win32event.PulseEvent(event)
- state = property(_get_state, _set_state)
-
- def wait(self, state, interval=0.1, channel=None):
- """Wait for the given state(s), KeyboardInterrupt or SystemExit.
-
- Since this class uses native win32event objects, the interval
- argument is ignored.
- """
- if isinstance(state, (tuple, list)):
- # Don't wait for an event that beat us to the punch ;)
- if self.state not in state:
- events = tuple([self._get_state_event(s) for s in state])
- win32event.WaitForMultipleObjects(events, 0, win32event.INFINITE)
- else:
- # Don't wait for an event that beat us to the punch ;)
- if self.state != state:
- event = self._get_state_event(state)
- win32event.WaitForSingleObject(event, win32event.INFINITE)
-
-
-class _ControlCodes(dict):
- """Control codes used to "signal" a service via ControlService.
-
- User-defined control codes are in the range 128-255. We generally use
- the standard Python value for the Linux signal and add 128. Example:
-
- >>> signal.SIGUSR1
- 10
- control_codes['graceful'] = 128 + 10
- """
-
- def key_for(self, obj):
- """For the given value, return its corresponding key."""
- for key, val in self.items():
- if val is obj:
- return key
- raise ValueError("The given object could not be found: %r" % obj)
-
-control_codes = _ControlCodes({'graceful': 138})
-
-
-def signal_child(service, command):
- if command == 'stop':
- win32serviceutil.StopService(service)
- elif command == 'restart':
- win32serviceutil.RestartService(service)
- else:
- win32serviceutil.ControlService(service, control_codes[command])
-
-
-class PyWebService(win32serviceutil.ServiceFramework):
- """Python Web Service."""
-
- _svc_name_ = "Python Web Service"
- _svc_display_name_ = "Python Web Service"
- _svc_deps_ = None # sequence of service names on which this depends
- _exe_name_ = "pywebsvc"
- _exe_args_ = None # Default to no arguments
-
- # Only exists on Windows 2000 or later, ignored on windows NT
- _svc_description_ = "Python Web Service"
-
- def SvcDoRun(self):
- from cherrypy import process
- process.bus.start()
- process.bus.block()
-
- def SvcStop(self):
- from cherrypy import process
- self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
- process.bus.exit()
-
- def SvcOther(self, control):
- process.bus.publish(control_codes.key_for(control))
-
-
-if __name__ == '__main__':
- win32serviceutil.HandleCommandLine(PyWebService)
diff --git a/python-packages/cherrypy/process/wspbus.py b/python-packages/cherrypy/process/wspbus.py
deleted file mode 100644
index 6ef768dcbb..0000000000
--- a/python-packages/cherrypy/process/wspbus.py
+++ /dev/null
@@ -1,432 +0,0 @@
-"""An implementation of the Web Site Process Bus.
-
-This module is completely standalone, depending only on the stdlib.
-
-Web Site Process Bus
---------------------
-
-A Bus object is used to contain and manage site-wide behavior:
-daemonization, HTTP server start/stop, process reload, signal handling,
-drop privileges, PID file management, logging for all of these,
-and many more.
-
-In addition, a Bus object provides a place for each web framework
-to register code that runs in response to site-wide events (like
-process start and stop), or which controls or otherwise interacts with
-the site-wide components mentioned above. For example, a framework which
-uses file-based templates would add known template filenames to an
-autoreload component.
-
-Ideally, a Bus object will be flexible enough to be useful in a variety
-of invocation scenarios:
-
- 1. The deployer starts a site from the command line via a
- framework-neutral deployment script; applications from multiple frameworks
- are mixed in a single site. Command-line arguments and configuration
- files are used to define site-wide components such as the HTTP server,
- WSGI component graph, autoreload behavior, signal handling, etc.
- 2. The deployer starts a site via some other process, such as Apache;
- applications from multiple frameworks are mixed in a single site.
- Autoreload and signal handling (from Python at least) are disabled.
- 3. The deployer starts a site via a framework-specific mechanism;
- for example, when running tests, exploring tutorials, or deploying
- single applications from a single framework. The framework controls
- which site-wide components are enabled as it sees fit.
-
-The Bus object in this package uses topic-based publish-subscribe
-messaging to accomplish all this. A few topic channels are built in
-('start', 'stop', 'exit', 'graceful', 'log', and 'main'). Frameworks and
-site containers are free to define their own. If a message is sent to a
-channel that has not been defined or has no listeners, there is no effect.
-
-In general, there should only ever be a single Bus object per process.
-Frameworks and site containers share a single Bus object by publishing
-messages and subscribing listeners.
-
-The Bus object works as a finite state machine which models the current
-state of the process. Bus methods move it from one state to another;
-those methods then publish to subscribed listeners on the channel for
-the new state.::
-
- O
- |
- V
- STOPPING --> STOPPED --> EXITING -> X
- A A |
- | \___ |
- | \ |
- | V V
- STARTED <-- STARTING
-
-"""
-
-import atexit
-import os
-import sys
-import threading
-import time
-import traceback as _traceback
-import warnings
-
-from cherrypy._cpcompat import set
-
-# Here I save the value of os.getcwd(), which, if I am imported early enough,
-# will be the directory from which the startup script was run. This is needed
-# by _do_execv(), to change back to the original directory before execv()ing a
-# new process. This is a defense against the application having changed the
-# current working directory (which could make sys.executable "not found" if
-# sys.executable is a relative-path, and/or cause other problems).
-_startup_cwd = os.getcwd()
-
-class ChannelFailures(Exception):
- """Exception raised when errors occur in a listener during Bus.publish()."""
- delimiter = '\n'
-
- def __init__(self, *args, **kwargs):
- # Don't use 'super' here; Exceptions are old-style in Py2.4
- # See http://www.cherrypy.org/ticket/959
- Exception.__init__(self, *args, **kwargs)
- self._exceptions = list()
-
- def handle_exception(self):
- """Append the current exception to self."""
- self._exceptions.append(sys.exc_info()[1])
-
- def get_instances(self):
- """Return a list of seen exception instances."""
- return self._exceptions[:]
-
- def __str__(self):
- exception_strings = map(repr, self.get_instances())
- return self.delimiter.join(exception_strings)
-
- __repr__ = __str__
-
- def __bool__(self):
- return bool(self._exceptions)
- __nonzero__ = __bool__
-
-# Use a flag to indicate the state of the bus.
-class _StateEnum(object):
- class State(object):
- name = None
- def __repr__(self):
- return "states.%s" % self.name
-
- def __setattr__(self, key, value):
- if isinstance(value, self.State):
- value.name = key
- object.__setattr__(self, key, value)
-states = _StateEnum()
-states.STOPPED = states.State()
-states.STARTING = states.State()
-states.STARTED = states.State()
-states.STOPPING = states.State()
-states.EXITING = states.State()
-
-
-try:
- import fcntl
-except ImportError:
- max_files = 0
-else:
- try:
- max_files = os.sysconf('SC_OPEN_MAX')
- except AttributeError:
- max_files = 1024
-
-
-class Bus(object):
- """Process state-machine and messenger for HTTP site deployment.
-
- All listeners for a given channel are guaranteed to be called even
- if others at the same channel fail. Each failure is logged, but
- execution proceeds on to the next listener. The only way to stop all
- processing from inside a listener is to raise SystemExit and stop the
- whole server.
- """
-
- states = states
- state = states.STOPPED
- execv = False
- max_cloexec_files = max_files
-
- def __init__(self):
- self.execv = False
- self.state = states.STOPPED
- self.listeners = dict(
- [(channel, set()) for channel
- in ('start', 'stop', 'exit', 'graceful', 'log', 'main')])
- self._priorities = {}
-
- def subscribe(self, channel, callback, priority=None):
- """Add the given callback at the given channel (if not present)."""
- if channel not in self.listeners:
- self.listeners[channel] = set()
- self.listeners[channel].add(callback)
-
- if priority is None:
- priority = getattr(callback, 'priority', 50)
- self._priorities[(channel, callback)] = priority
-
- def unsubscribe(self, channel, callback):
- """Discard the given callback (if present)."""
- listeners = self.listeners.get(channel)
- if listeners and callback in listeners:
- listeners.discard(callback)
- del self._priorities[(channel, callback)]
-
- def publish(self, channel, *args, **kwargs):
- """Return output of all subscribers for the given channel."""
- if channel not in self.listeners:
- return []
-
- exc = ChannelFailures()
- output = []
-
- items = [(self._priorities[(channel, listener)], listener)
- for listener in self.listeners[channel]]
- try:
- items.sort(key=lambda item: item[0])
- except TypeError:
- # Python 2.3 had no 'key' arg, but that doesn't matter
- # since it could sort dissimilar types just fine.
- items.sort()
- for priority, listener in items:
- try:
- output.append(listener(*args, **kwargs))
- except KeyboardInterrupt:
- raise
- except SystemExit:
- e = sys.exc_info()[1]
- # If we have previous errors ensure the exit code is non-zero
- if exc and e.code == 0:
- e.code = 1
- raise
- except:
- exc.handle_exception()
- if channel == 'log':
- # Assume any further messages to 'log' will fail.
- pass
- else:
- self.log("Error in %r listener %r" % (channel, listener),
- level=40, traceback=True)
- if exc:
- raise exc
- return output
-
- def _clean_exit(self):
- """An atexit handler which asserts the Bus is not running."""
- if self.state != states.EXITING:
- warnings.warn(
- "The main thread is exiting, but the Bus is in the %r state; "
- "shutting it down automatically now. You must either call "
- "bus.block() after start(), or call bus.exit() before the "
- "main thread exits." % self.state, RuntimeWarning)
- self.exit()
-
- def start(self):
- """Start all services."""
- atexit.register(self._clean_exit)
-
- self.state = states.STARTING
- self.log('Bus STARTING')
- try:
- self.publish('start')
- self.state = states.STARTED
- self.log('Bus STARTED')
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.log("Shutting down due to error in start listener:",
- level=40, traceback=True)
- e_info = sys.exc_info()[1]
- try:
- self.exit()
- except:
- # Any stop/exit errors will be logged inside publish().
- pass
- # Re-raise the original error
- raise e_info
-
- def exit(self):
- """Stop all services and prepare to exit the process."""
- exitstate = self.state
- try:
- self.stop()
-
- self.state = states.EXITING
- self.log('Bus EXITING')
- self.publish('exit')
- # This isn't strictly necessary, but it's better than seeing
- # "Waiting for child threads to terminate..." and then nothing.
- self.log('Bus EXITED')
- except:
- # This method is often called asynchronously (whether thread,
- # signal handler, console handler, or atexit handler), so we
- # can't just let exceptions propagate out unhandled.
- # Assume it's been logged and just die.
- os._exit(70) # EX_SOFTWARE
-
- if exitstate == states.STARTING:
- # exit() was called before start() finished, possibly due to
- # Ctrl-C because a start listener got stuck. In this case,
- # we could get stuck in a loop where Ctrl-C never exits the
- # process, so we just call os.exit here.
- os._exit(70) # EX_SOFTWARE
-
- def restart(self):
- """Restart the process (may close connections).
-
- This method does not restart the process from the calling thread;
- instead, it stops the bus and asks the main thread to call execv.
- """
- self.execv = True
- self.exit()
-
- def graceful(self):
- """Advise all services to reload."""
- self.log('Bus graceful')
- self.publish('graceful')
-
- def block(self, interval=0.1):
- """Wait for the EXITING state, KeyboardInterrupt or SystemExit.
-
- This function is intended to be called only by the main thread.
- After waiting for the EXITING state, it also waits for all threads
- to terminate, and then calls os.execv if self.execv is True. This
- design allows another thread to call bus.restart, yet have the main
- thread perform the actual execv call (required on some platforms).
- """
- try:
- self.wait(states.EXITING, interval=interval, channel='main')
- except (KeyboardInterrupt, IOError):
- # The time.sleep call might raise
- # "IOError: [Errno 4] Interrupted function call" on KBInt.
- self.log('Keyboard Interrupt: shutting down bus')
- self.exit()
- except SystemExit:
- self.log('SystemExit raised: shutting down bus')
- self.exit()
- raise
-
- # Waiting for ALL child threads to finish is necessary on OS X.
- # See http://www.cherrypy.org/ticket/581.
- # It's also good to let them all shut down before allowing
- # the main thread to call atexit handlers.
- # See http://www.cherrypy.org/ticket/751.
- self.log("Waiting for child threads to terminate...")
- for t in threading.enumerate():
- if t != threading.currentThread() and t.isAlive():
- # Note that any dummy (external) threads are always daemonic.
- if hasattr(threading.Thread, "daemon"):
- # Python 2.6+
- d = t.daemon
- else:
- d = t.isDaemon()
- if not d:
- self.log("Waiting for thread %s." % t.getName())
- t.join()
-
- if self.execv:
- self._do_execv()
-
- def wait(self, state, interval=0.1, channel=None):
- """Poll for the given state(s) at intervals; publish to channel."""
- if isinstance(state, (tuple, list)):
- states = state
- else:
- states = [state]
-
- def _wait():
- while self.state not in states:
- time.sleep(interval)
- self.publish(channel)
-
- # From http://psyco.sourceforge.net/psycoguide/bugs.html:
- # "The compiled machine code does not include the regular polling
- # done by Python, meaning that a KeyboardInterrupt will not be
- # detected before execution comes back to the regular Python
- # interpreter. Your program cannot be interrupted if caught
- # into an infinite Psyco-compiled loop."
- try:
- sys.modules['psyco'].cannotcompile(_wait)
- except (KeyError, AttributeError):
- pass
-
- _wait()
-
- def _do_execv(self):
- """Re-execute the current process.
-
- This must be called from the main thread, because certain platforms
- (OS X) don't allow execv to be called in a child thread very well.
- """
- args = sys.argv[:]
- self.log('Re-spawning %s' % ' '.join(args))
-
- if sys.platform[:4] == 'java':
- from _systemrestart import SystemRestart
- raise SystemRestart
- else:
- args.insert(0, sys.executable)
- if sys.platform == 'win32':
- args = ['"%s"' % arg for arg in args]
-
- os.chdir(_startup_cwd)
- if self.max_cloexec_files:
- self._set_cloexec()
- os.execv(sys.executable, args)
-
- def _set_cloexec(self):
- """Set the CLOEXEC flag on all open files (except stdin/out/err).
-
- If self.max_cloexec_files is an integer (the default), then on
- platforms which support it, it represents the max open files setting
- for the operating system. This function will be called just before
- the process is restarted via os.execv() to prevent open files
- from persisting into the new process.
-
- Set self.max_cloexec_files to 0 to disable this behavior.
- """
- for fd in range(3, self.max_cloexec_files): # skip stdin/out/err
- try:
- flags = fcntl.fcntl(fd, fcntl.F_GETFD)
- except IOError:
- continue
- fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
-
- def stop(self):
- """Stop all services."""
- self.state = states.STOPPING
- self.log('Bus STOPPING')
- self.publish('stop')
- self.state = states.STOPPED
- self.log('Bus STOPPED')
-
- def start_with_callback(self, func, args=None, kwargs=None):
- """Start 'func' in a new thread T, then start self (and return T)."""
- if args is None:
- args = ()
- if kwargs is None:
- kwargs = {}
- args = (func,) + args
-
- def _callback(func, *a, **kw):
- self.wait(states.STARTED)
- func(*a, **kw)
- t = threading.Thread(target=_callback, args=args, kwargs=kwargs)
- t.setName('Bus Callback ' + t.getName())
- t.start()
-
- self.start()
-
- return t
-
- def log(self, msg="", level=20, traceback=False):
- """Log the given message. Append the last traceback if requested."""
- if traceback:
- msg += "\n" + "".join(_traceback.format_exception(*sys.exc_info()))
- self.publish('log', msg, level)
-
-bus = Bus()
diff --git a/python-packages/cherrypy/scaffold/__init__.py b/python-packages/cherrypy/scaffold/__init__.py
deleted file mode 100644
index 00964ac5f6..0000000000
--- a/python-packages/cherrypy/scaffold/__init__.py
+++ /dev/null
@@ -1,61 +0,0 @@
-""", a CherryPy application.
-
-Use this as a base for creating new CherryPy applications. When you want
-to make a new app, copy and paste this folder to some other location
-(maybe site-packages) and rename it to the name of your project,
-then tweak as desired.
-
-Even before any tweaking, this should serve a few demonstration pages.
-Change to this directory and run:
-
- ../cherryd -c site.conf
-
-"""
-
-import cherrypy
-from cherrypy import tools, url
-
-import os
-local_dir = os.path.join(os.getcwd(), os.path.dirname(__file__))
-
-
-class Root:
-
- _cp_config = {'tools.log_tracebacks.on': True,
- }
-
- def index(self):
- return """
-Try some other path,
-or a default path.
-Or, just look at the pretty picture:
-
-""" % (url("other"), url("else"),
- url("files/made_with_cherrypy_small.png"))
- index.exposed = True
-
- def default(self, *args, **kwargs):
- return "args: %s kwargs: %s" % (args, kwargs)
- default.exposed = True
-
- def other(self, a=2, b='bananas', c=None):
- cherrypy.response.headers['Content-Type'] = 'text/plain'
- if c is None:
- return "Have %d %s." % (int(a), b)
- else:
- return "Have %d %s, %s." % (int(a), b, c)
- other.exposed = True
-
- files = cherrypy.tools.staticdir.handler(
- section="/files",
- dir=os.path.join(local_dir, "static"),
- # Ignore .php files, etc.
- match=r'\.(css|gif|html?|ico|jpe?g|js|png|swf|xml)$',
- )
-
-
-root = Root()
-
-# Uncomment the following to use your own favicon instead of CP's default.
-#favicon_path = os.path.join(local_dir, "favicon.ico")
-#root.favicon_ico = tools.staticfile.handler(filename=favicon_path)
diff --git a/python-packages/cherrypy/scaffold/apache-fcgi.conf b/python-packages/cherrypy/scaffold/apache-fcgi.conf
deleted file mode 100644
index 922398eaf8..0000000000
--- a/python-packages/cherrypy/scaffold/apache-fcgi.conf
+++ /dev/null
@@ -1,22 +0,0 @@
-# Apache2 server conf file for using CherryPy with mod_fcgid.
-
-# This doesn't have to be "C:/", but it has to be a directory somewhere, and
-# MUST match the directory used in the FastCgiExternalServer directive, below.
-DocumentRoot "C:/"
-
-ServerName 127.0.0.1
-Listen 80
-LoadModule fastcgi_module modules/mod_fastcgi.dll
-LoadModule rewrite_module modules/mod_rewrite.so
-
-Options ExecCGI
-SetHandler fastcgi-script
-RewriteEngine On
-# Send requests for any URI to our fastcgi handler.
-RewriteRule ^(.*)$ /fastcgi.pyc [L]
-
-# The FastCgiExternalServer directive defines filename as an external FastCGI application.
-# If filename does not begin with a slash (/) then it is assumed to be relative to the ServerRoot.
-# The filename does not have to exist in the local filesystem. URIs that Apache resolves to this
-# filename will be handled by this external FastCGI application.
-FastCgiExternalServer "C:/fastcgi.pyc" -host 127.0.0.1:8088
\ No newline at end of file
diff --git a/python-packages/cherrypy/scaffold/example.conf b/python-packages/cherrypy/scaffold/example.conf
deleted file mode 100644
index 93a6e53c05..0000000000
--- a/python-packages/cherrypy/scaffold/example.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-[/]
-log.error_file: "error.log"
-log.access_file: "access.log"
\ No newline at end of file
diff --git a/python-packages/cherrypy/scaffold/site.conf b/python-packages/cherrypy/scaffold/site.conf
deleted file mode 100644
index 6ed3898373..0000000000
--- a/python-packages/cherrypy/scaffold/site.conf
+++ /dev/null
@@ -1,14 +0,0 @@
-[global]
-# Uncomment this when you're done developing
-#environment: "production"
-
-server.socket_host: "0.0.0.0"
-server.socket_port: 8088
-
-# Uncomment the following lines to run on HTTPS at the same time
-#server.2.socket_host: "0.0.0.0"
-#server.2.socket_port: 8433
-#server.2.ssl_certificate: '../test/test.pem'
-#server.2.ssl_private_key: '../test/test.pem'
-
-tree.myapp: cherrypy.Application(scaffold.root, "/", "example.conf")
diff --git a/python-packages/cherrypy/scaffold/static/made_with_cherrypy_small.png b/python-packages/cherrypy/scaffold/static/made_with_cherrypy_small.png
deleted file mode 100644
index c3aafeed95..0000000000
Binary files a/python-packages/cherrypy/scaffold/static/made_with_cherrypy_small.png and /dev/null differ
diff --git a/python-packages/cherrypy/wsgiserver/__init__.py b/python-packages/cherrypy/wsgiserver/__init__.py
deleted file mode 100644
index ee6190fee1..0000000000
--- a/python-packages/cherrypy/wsgiserver/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
- 'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
- 'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
- 'WorkerThread', 'ThreadPool', 'SSLAdapter',
- 'CherryPyWSGIServer',
- 'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
- 'WSGIPathInfoDispatcher', 'get_ssl_adapter_class']
-
-import sys
-if sys.version_info < (3, 0):
- from wsgiserver2 import *
-else:
- # Le sigh. Boo for backward-incompatible syntax.
- exec('from .wsgiserver3 import *')
diff --git a/python-packages/cherrypy/wsgiserver/ssl_builtin.py b/python-packages/cherrypy/wsgiserver/ssl_builtin.py
deleted file mode 100644
index 03bf05deed..0000000000
--- a/python-packages/cherrypy/wsgiserver/ssl_builtin.py
+++ /dev/null
@@ -1,91 +0,0 @@
-"""A library for integrating Python's builtin ``ssl`` library with CherryPy.
-
-The ssl module must be importable for SSL functionality.
-
-To use this module, set ``CherryPyWSGIServer.ssl_adapter`` to an instance of
-``BuiltinSSLAdapter``.
-"""
-
-try:
- import ssl
-except ImportError:
- ssl = None
-
-try:
- from _pyio import DEFAULT_BUFFER_SIZE
-except ImportError:
- try:
- from io import DEFAULT_BUFFER_SIZE
- except ImportError:
- DEFAULT_BUFFER_SIZE = -1
-
-import sys
-
-from cherrypy import wsgiserver
-
-
-class BuiltinSSLAdapter(wsgiserver.SSLAdapter):
- """A wrapper for integrating Python's builtin ssl module with CherryPy."""
-
- certificate = None
- """The filename of the server SSL certificate."""
-
- private_key = None
- """The filename of the server's private key file."""
-
- def __init__(self, certificate, private_key, certificate_chain=None):
- if ssl is None:
- raise ImportError("You must install the ssl module to use HTTPS.")
- self.certificate = certificate
- self.private_key = private_key
- self.certificate_chain = certificate_chain
-
- def bind(self, sock):
- """Wrap and return the given socket."""
- return sock
-
- def wrap(self, sock):
- """Wrap and return the given socket, plus WSGI environ entries."""
- try:
- s = ssl.wrap_socket(sock, do_handshake_on_connect=True,
- server_side=True, certfile=self.certificate,
- keyfile=self.private_key, ssl_version=ssl.PROTOCOL_SSLv23)
- except ssl.SSLError:
- e = sys.exc_info()[1]
- if e.errno == ssl.SSL_ERROR_EOF:
- # This is almost certainly due to the cherrypy engine
- # 'pinging' the socket to assert it's connectable;
- # the 'ping' isn't SSL.
- return None, {}
- elif e.errno == ssl.SSL_ERROR_SSL:
- if e.args[1].endswith('http request'):
- # The client is speaking HTTP to an HTTPS server.
- raise wsgiserver.NoSSLError
- elif e.args[1].endswith('unknown protocol'):
- # The client is speaking some non-HTTP protocol.
- # Drop the conn.
- return None, {}
- raise
- return s, self.get_environ(s)
-
- # TODO: fill this out more with mod ssl env
- def get_environ(self, sock):
- """Create WSGI environ entries to be merged into each request."""
- cipher = sock.cipher()
- ssl_environ = {
- "wsgi.url_scheme": "https",
- "HTTPS": "on",
- 'SSL_PROTOCOL': cipher[1],
- 'SSL_CIPHER': cipher[0]
-## SSL_VERSION_INTERFACE string The mod_ssl program version
-## SSL_VERSION_LIBRARY string The OpenSSL program version
- }
- return ssl_environ
-
- if sys.version_info >= (3, 0):
- def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
- return wsgiserver.CP_makefile(sock, mode, bufsize)
- else:
- def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
- return wsgiserver.CP_fileobject(sock, mode, bufsize)
-
diff --git a/python-packages/cherrypy/wsgiserver/ssl_pyopenssl.py b/python-packages/cherrypy/wsgiserver/ssl_pyopenssl.py
deleted file mode 100644
index f3d9bf54b8..0000000000
--- a/python-packages/cherrypy/wsgiserver/ssl_pyopenssl.py
+++ /dev/null
@@ -1,256 +0,0 @@
-"""A library for integrating pyOpenSSL with CherryPy.
-
-The OpenSSL module must be importable for SSL functionality.
-You can obtain it from http://pyopenssl.sourceforge.net/
-
-To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of
-SSLAdapter. There are two ways to use SSL:
-
-Method One
-----------
-
- * ``ssl_adapter.context``: an instance of SSL.Context.
-
-If this is not None, it is assumed to be an SSL.Context instance,
-and will be passed to SSL.Connection on bind(). The developer is
-responsible for forming a valid Context object. This approach is
-to be preferred for more flexibility, e.g. if the cert and key are
-streams instead of files, or need decryption, or SSL.SSLv3_METHOD
-is desired instead of the default SSL.SSLv23_METHOD, etc. Consult
-the pyOpenSSL documentation for complete options.
-
-Method Two (shortcut)
----------------------
-
- * ``ssl_adapter.certificate``: the filename of the server SSL certificate.
- * ``ssl_adapter.private_key``: the filename of the server's private key file.
-
-Both are None by default. If ssl_adapter.context is None, but .private_key
-and .certificate are both given and valid, they will be read, and the
-context will be automatically created from them.
-"""
-
-import socket
-import threading
-import time
-
-from cherrypy import wsgiserver
-
-try:
- from OpenSSL import SSL
- from OpenSSL import crypto
-except ImportError:
- SSL = None
-
-
-class SSL_fileobject(wsgiserver.CP_fileobject):
- """SSL file object attached to a socket object."""
-
- ssl_timeout = 3
- ssl_retry = .01
-
- def _safe_call(self, is_reader, call, *args, **kwargs):
- """Wrap the given call with SSL error-trapping.
-
- is_reader: if False EOF errors will be raised. If True, EOF errors
- will return "" (to emulate normal sockets).
- """
- start = time.time()
- while True:
- try:
- return call(*args, **kwargs)
- except SSL.WantReadError:
- # Sleep and try again. This is dangerous, because it means
- # the rest of the stack has no way of differentiating
- # between a "new handshake" error and "client dropped".
- # Note this isn't an endless loop: there's a timeout below.
- time.sleep(self.ssl_retry)
- except SSL.WantWriteError:
- time.sleep(self.ssl_retry)
- except SSL.SysCallError, e:
- if is_reader and e.args == (-1, 'Unexpected EOF'):
- return ""
-
- errnum = e.args[0]
- if is_reader and errnum in wsgiserver.socket_errors_to_ignore:
- return ""
- raise socket.error(errnum)
- except SSL.Error, e:
- if is_reader and e.args == (-1, 'Unexpected EOF'):
- return ""
-
- thirdarg = None
- try:
- thirdarg = e.args[0][0][2]
- except IndexError:
- pass
-
- if thirdarg == 'http request':
- # The client is talking HTTP to an HTTPS server.
- raise wsgiserver.NoSSLError()
-
- raise wsgiserver.FatalSSLAlert(*e.args)
- except:
- raise
-
- if time.time() - start > self.ssl_timeout:
- raise socket.timeout("timed out")
-
- def recv(self, *args, **kwargs):
- buf = []
- r = super(SSL_fileobject, self).recv
- while True:
- data = self._safe_call(True, r, *args, **kwargs)
- buf.append(data)
- p = self._sock.pending()
- if not p:
- return "".join(buf)
-
- def sendall(self, *args, **kwargs):
- return self._safe_call(False, super(SSL_fileobject, self).sendall,
- *args, **kwargs)
-
- def send(self, *args, **kwargs):
- return self._safe_call(False, super(SSL_fileobject, self).send,
- *args, **kwargs)
-
-
-class SSLConnection:
- """A thread-safe wrapper for an SSL.Connection.
-
- ``*args``: the arguments to create the wrapped ``SSL.Connection(*args)``.
- """
-
- def __init__(self, *args):
- self._ssl_conn = SSL.Connection(*args)
- self._lock = threading.RLock()
-
- for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
- 'renegotiate', 'bind', 'listen', 'connect', 'accept',
- 'setblocking', 'fileno', 'close', 'get_cipher_list',
- 'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
- 'makefile', 'get_app_data', 'set_app_data', 'state_string',
- 'sock_shutdown', 'get_peer_certificate', 'want_read',
- 'want_write', 'set_connect_state', 'set_accept_state',
- 'connect_ex', 'sendall', 'settimeout', 'gettimeout'):
- exec("""def %s(self, *args):
- self._lock.acquire()
- try:
- return self._ssl_conn.%s(*args)
- finally:
- self._lock.release()
-""" % (f, f))
-
- def shutdown(self, *args):
- self._lock.acquire()
- try:
- # pyOpenSSL.socket.shutdown takes no args
- return self._ssl_conn.shutdown()
- finally:
- self._lock.release()
-
-
-class pyOpenSSLAdapter(wsgiserver.SSLAdapter):
- """A wrapper for integrating pyOpenSSL with CherryPy."""
-
- context = None
- """An instance of SSL.Context."""
-
- certificate = None
- """The filename of the server SSL certificate."""
-
- private_key = None
- """The filename of the server's private key file."""
-
- certificate_chain = None
- """Optional. The filename of CA's intermediate certificate bundle.
-
- This is needed for cheaper "chained root" SSL certificates, and should be
- left as None if not required."""
-
- def __init__(self, certificate, private_key, certificate_chain=None):
- if SSL is None:
- raise ImportError("You must install pyOpenSSL to use HTTPS.")
-
- self.context = None
- self.certificate = certificate
- self.private_key = private_key
- self.certificate_chain = certificate_chain
- self._environ = None
-
- def bind(self, sock):
- """Wrap and return the given socket."""
- if self.context is None:
- self.context = self.get_context()
- conn = SSLConnection(self.context, sock)
- self._environ = self.get_environ()
- return conn
-
- def wrap(self, sock):
- """Wrap and return the given socket, plus WSGI environ entries."""
- return sock, self._environ.copy()
-
- def get_context(self):
- """Return an SSL.Context from self attributes."""
- # See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
- c = SSL.Context(SSL.SSLv23_METHOD)
- c.use_privatekey_file(self.private_key)
- if self.certificate_chain:
- c.load_verify_locations(self.certificate_chain)
- c.use_certificate_file(self.certificate)
- return c
-
- def get_environ(self):
- """Return WSGI environ entries to be merged into each request."""
- ssl_environ = {
- "HTTPS": "on",
- # pyOpenSSL doesn't provide access to any of these AFAICT
-## 'SSL_PROTOCOL': 'SSLv2',
-## SSL_CIPHER string The cipher specification name
-## SSL_VERSION_INTERFACE string The mod_ssl program version
-## SSL_VERSION_LIBRARY string The OpenSSL program version
- }
-
- if self.certificate:
- # Server certificate attributes
- cert = open(self.certificate, 'rb').read()
- cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
- ssl_environ.update({
- 'SSL_SERVER_M_VERSION': cert.get_version(),
- 'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
-## 'SSL_SERVER_V_START': Validity of server's certificate (start time),
-## 'SSL_SERVER_V_END': Validity of server's certificate (end time),
- })
-
- for prefix, dn in [("I", cert.get_issuer()),
- ("S", cert.get_subject())]:
- # X509Name objects don't seem to have a way to get the
- # complete DN string. Use str() and slice it instead,
- # because str(dn) == ""
- dnstr = str(dn)[18:-2]
-
- wsgikey = 'SSL_SERVER_%s_DN' % prefix
- ssl_environ[wsgikey] = dnstr
-
- # The DN should be of the form: /k1=v1/k2=v2, but we must allow
- # for any value to contain slashes itself (in a URL).
- while dnstr:
- pos = dnstr.rfind("=")
- dnstr, value = dnstr[:pos], dnstr[pos + 1:]
- pos = dnstr.rfind("/")
- dnstr, key = dnstr[:pos], dnstr[pos + 1:]
- if key and value:
- wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
- ssl_environ[wsgikey] = value
-
- return ssl_environ
-
- def makefile(self, sock, mode='r', bufsize=-1):
- if SSL and isinstance(sock, SSL.ConnectionType):
- timeout = sock.gettimeout()
- f = SSL_fileobject(sock, mode, bufsize)
- f.ssl_timeout = timeout
- return f
- else:
- return wsgiserver.CP_fileobject(sock, mode, bufsize)
-
diff --git a/python-packages/cherrypy/wsgiserver/wsgiserver2.py b/python-packages/cherrypy/wsgiserver/wsgiserver2.py
deleted file mode 100644
index b6bd499718..0000000000
--- a/python-packages/cherrypy/wsgiserver/wsgiserver2.py
+++ /dev/null
@@ -1,2322 +0,0 @@
-"""A high-speed, production ready, thread pooled, generic HTTP server.
-
-Simplest example on how to use this module directly
-(without using CherryPy's application machinery)::
-
- from cherrypy import wsgiserver
-
- def my_crazy_app(environ, start_response):
- status = '200 OK'
- response_headers = [('Content-type','text/plain')]
- start_response(status, response_headers)
- return ['Hello world!']
-
- server = wsgiserver.CherryPyWSGIServer(
- ('0.0.0.0', 8070), my_crazy_app,
- server_name='www.cherrypy.example')
- server.start()
-
-The CherryPy WSGI server can serve as many WSGI applications
-as you want in one instance by using a WSGIPathInfoDispatcher::
-
- d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
- server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
-
-Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
-
-This won't call the CherryPy engine (application side) at all, only the
-HTTP server, which is independent from the rest of CherryPy. Don't
-let the name "CherryPyWSGIServer" throw you; the name merely reflects
-its origin, not its coupling.
-
-For those of you wanting to understand internals of this module, here's the
-basic call flow. The server's listening thread runs a very tight loop,
-sticking incoming connections onto a Queue::
-
- server = CherryPyWSGIServer(...)
- server.start()
- while True:
- tick()
- # This blocks until a request comes in:
- child = socket.accept()
- conn = HTTPConnection(child, ...)
- server.requests.put(conn)
-
-Worker threads are kept in a pool and poll the Queue, popping off and then
-handling each connection in turn. Each connection can consist of an arbitrary
-number of requests and their responses, so we run a nested loop::
-
- while True:
- conn = server.requests.get()
- conn.communicate()
- -> while True:
- req = HTTPRequest(...)
- req.parse_request()
- -> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
- req.rfile.readline()
- read_headers(req.rfile, req.inheaders)
- req.respond()
- -> response = app(...)
- try:
- for chunk in response:
- if chunk:
- req.write(chunk)
- finally:
- if hasattr(response, "close"):
- response.close()
- if req.close_connection:
- return
-"""
-
-__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
- 'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
- 'CP_fileobject',
- 'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
- 'WorkerThread', 'ThreadPool', 'SSLAdapter',
- 'CherryPyWSGIServer',
- 'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
- 'WSGIPathInfoDispatcher', 'get_ssl_adapter_class']
-
-import os
-try:
- import queue
-except:
- import Queue as queue
-import re
-import rfc822
-import socket
-import sys
-if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'):
- socket.IPPROTO_IPV6 = 41
-try:
- import cStringIO as StringIO
-except ImportError:
- import StringIO
-DEFAULT_BUFFER_SIZE = -1
-
-_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
-
-import threading
-import time
-import traceback
-def format_exc(limit=None):
- """Like print_exc() but return a string. Backport for Python 2.3."""
- try:
- etype, value, tb = sys.exc_info()
- return ''.join(traceback.format_exception(etype, value, tb, limit))
- finally:
- etype = value = tb = None
-
-
-from urllib import unquote
-from urlparse import urlparse
-import warnings
-
-if sys.version_info >= (3, 0):
- bytestr = bytes
- unicodestr = str
- basestring = (bytes, str)
- def ntob(n, encoding='ISO-8859-1'):
- """Return the given native string as a byte string in the given encoding."""
- # In Python 3, the native string type is unicode
- return n.encode(encoding)
-else:
- bytestr = str
- unicodestr = unicode
- basestring = basestring
- def ntob(n, encoding='ISO-8859-1'):
- """Return the given native string as a byte string in the given encoding."""
- # In Python 2, the native string type is bytes. Assume it's already
- # in the given encoding, which for ISO-8859-1 is almost always what
- # was intended.
- return n
-
-LF = ntob('\n')
-CRLF = ntob('\r\n')
-TAB = ntob('\t')
-SPACE = ntob(' ')
-COLON = ntob(':')
-SEMICOLON = ntob(';')
-EMPTY = ntob('')
-NUMBER_SIGN = ntob('#')
-QUESTION_MARK = ntob('?')
-ASTERISK = ntob('*')
-FORWARD_SLASH = ntob('/')
-quoted_slash = re.compile(ntob("(?i)%2F"))
-
-import errno
-
-def plat_specific_errors(*errnames):
- """Return error numbers for all errors in errnames on this platform.
-
- The 'errno' module contains different global constants depending on
- the specific platform (OS). This function will return the list of
- numeric values for a given list of potential names.
- """
- errno_names = dir(errno)
- nums = [getattr(errno, k) for k in errnames if k in errno_names]
- # de-dupe the list
- return list(dict.fromkeys(nums).keys())
-
-socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
-
-socket_errors_to_ignore = plat_specific_errors(
- "EPIPE",
- "EBADF", "WSAEBADF",
- "ENOTSOCK", "WSAENOTSOCK",
- "ETIMEDOUT", "WSAETIMEDOUT",
- "ECONNREFUSED", "WSAECONNREFUSED",
- "ECONNRESET", "WSAECONNRESET",
- "ECONNABORTED", "WSAECONNABORTED",
- "ENETRESET", "WSAENETRESET",
- "EHOSTDOWN", "EHOSTUNREACH",
- )
-socket_errors_to_ignore.append("timed out")
-socket_errors_to_ignore.append("The read operation timed out")
-
-socket_errors_nonblocking = plat_specific_errors(
- 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
-
-comma_separated_headers = [ntob(h) for h in
- ['Accept', 'Accept-Charset', 'Accept-Encoding',
- 'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
- 'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
- 'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
- 'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
- 'WWW-Authenticate']]
-
-
-import logging
-if not hasattr(logging, 'statistics'): logging.statistics = {}
-
-
-def read_headers(rfile, hdict=None):
- """Read headers from the given stream into the given header dict.
-
- If hdict is None, a new header dict is created. Returns the populated
- header dict.
-
- Headers which are repeated are folded together using a comma if their
- specification so dictates.
-
- This function raises ValueError when the read bytes violate the HTTP spec.
- You should probably return "400 Bad Request" if this happens.
- """
- if hdict is None:
- hdict = {}
-
- while True:
- line = rfile.readline()
- if not line:
- # No more data--illegal end of headers
- raise ValueError("Illegal end of headers.")
-
- if line == CRLF:
- # Normal end of headers
- break
- if not line.endswith(CRLF):
- raise ValueError("HTTP requires CRLF terminators")
-
- if line[0] in (SPACE, TAB):
- # It's a continuation line.
- v = line.strip()
- else:
- try:
- k, v = line.split(COLON, 1)
- except ValueError:
- raise ValueError("Illegal header line.")
- # TODO: what about TE and WWW-Authenticate?
- k = k.strip().title()
- v = v.strip()
- hname = k
-
- if k in comma_separated_headers:
- existing = hdict.get(hname)
- if existing:
- v = ", ".join((existing, v))
- hdict[hname] = v
-
- return hdict
-
-
-class MaxSizeExceeded(Exception):
- pass
-
-class SizeCheckWrapper(object):
- """Wraps a file-like object, raising MaxSizeExceeded if too large."""
-
- def __init__(self, rfile, maxlen):
- self.rfile = rfile
- self.maxlen = maxlen
- self.bytes_read = 0
-
- def _check_length(self):
- if self.maxlen and self.bytes_read > self.maxlen:
- raise MaxSizeExceeded()
-
- def read(self, size=None):
- data = self.rfile.read(size)
- self.bytes_read += len(data)
- self._check_length()
- return data
-
- def readline(self, size=None):
- if size is not None:
- data = self.rfile.readline(size)
- self.bytes_read += len(data)
- self._check_length()
- return data
-
- # User didn't specify a size ...
- # We read the line in chunks to make sure it's not a 100MB line !
- res = []
- while True:
- data = self.rfile.readline(256)
- self.bytes_read += len(data)
- self._check_length()
- res.append(data)
- # See http://www.cherrypy.org/ticket/421
- if len(data) < 256 or data[-1:] == "\n":
- return EMPTY.join(res)
-
- def readlines(self, sizehint=0):
- # Shamelessly stolen from StringIO
- total = 0
- lines = []
- line = self.readline()
- while line:
- lines.append(line)
- total += len(line)
- if 0 < sizehint <= total:
- break
- line = self.readline()
- return lines
-
- def close(self):
- self.rfile.close()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- data = next(self.rfile)
- self.bytes_read += len(data)
- self._check_length()
- return data
-
- def next(self):
- data = self.rfile.next()
- self.bytes_read += len(data)
- self._check_length()
- return data
-
-
-class KnownLengthRFile(object):
- """Wraps a file-like object, returning an empty string when exhausted."""
-
- def __init__(self, rfile, content_length):
- self.rfile = rfile
- self.remaining = content_length
-
- def read(self, size=None):
- if self.remaining == 0:
- return ''
- if size is None:
- size = self.remaining
- else:
- size = min(size, self.remaining)
-
- data = self.rfile.read(size)
- self.remaining -= len(data)
- return data
-
- def readline(self, size=None):
- if self.remaining == 0:
- return ''
- if size is None:
- size = self.remaining
- else:
- size = min(size, self.remaining)
-
- data = self.rfile.readline(size)
- self.remaining -= len(data)
- return data
-
- def readlines(self, sizehint=0):
- # Shamelessly stolen from StringIO
- total = 0
- lines = []
- line = self.readline(sizehint)
- while line:
- lines.append(line)
- total += len(line)
- if 0 < sizehint <= total:
- break
- line = self.readline(sizehint)
- return lines
-
- def close(self):
- self.rfile.close()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- data = next(self.rfile)
- self.remaining -= len(data)
- return data
-
-
-class ChunkedRFile(object):
- """Wraps a file-like object, returning an empty string when exhausted.
-
- This class is intended to provide a conforming wsgi.input value for
- request entities that have been encoded with the 'chunked' transfer
- encoding.
- """
-
- def __init__(self, rfile, maxlen, bufsize=8192):
- self.rfile = rfile
- self.maxlen = maxlen
- self.bytes_read = 0
- self.buffer = EMPTY
- self.bufsize = bufsize
- self.closed = False
-
- def _fetch(self):
- if self.closed:
- return
-
- line = self.rfile.readline()
- self.bytes_read += len(line)
-
- if self.maxlen and self.bytes_read > self.maxlen:
- raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
-
- line = line.strip().split(SEMICOLON, 1)
-
- try:
- chunk_size = line.pop(0)
- chunk_size = int(chunk_size, 16)
- except ValueError:
- raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
-
- if chunk_size <= 0:
- self.closed = True
- return
-
-## if line: chunk_extension = line[0]
-
- if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
- raise IOError("Request Entity Too Large")
-
- chunk = self.rfile.read(chunk_size)
- self.bytes_read += len(chunk)
- self.buffer += chunk
-
- crlf = self.rfile.read(2)
- if crlf != CRLF:
- raise ValueError(
- "Bad chunked transfer coding (expected '\\r\\n', "
- "got " + repr(crlf) + ")")
-
- def read(self, size=None):
- data = EMPTY
- while True:
- if size and len(data) >= size:
- return data
-
- if not self.buffer:
- self._fetch()
- if not self.buffer:
- # EOF
- return data
-
- if size:
- remaining = size - len(data)
- data += self.buffer[:remaining]
- self.buffer = self.buffer[remaining:]
- else:
- data += self.buffer
-
- def readline(self, size=None):
- data = EMPTY
- while True:
- if size and len(data) >= size:
- return data
-
- if not self.buffer:
- self._fetch()
- if not self.buffer:
- # EOF
- return data
-
- newline_pos = self.buffer.find(LF)
- if size:
- if newline_pos == -1:
- remaining = size - len(data)
- data += self.buffer[:remaining]
- self.buffer = self.buffer[remaining:]
- else:
- remaining = min(size - len(data), newline_pos)
- data += self.buffer[:remaining]
- self.buffer = self.buffer[remaining:]
- else:
- if newline_pos == -1:
- data += self.buffer
- else:
- data += self.buffer[:newline_pos]
- self.buffer = self.buffer[newline_pos:]
-
- def readlines(self, sizehint=0):
- # Shamelessly stolen from StringIO
- total = 0
- lines = []
- line = self.readline(sizehint)
- while line:
- lines.append(line)
- total += len(line)
- if 0 < sizehint <= total:
- break
- line = self.readline(sizehint)
- return lines
-
- def read_trailer_lines(self):
- if not self.closed:
- raise ValueError(
- "Cannot read trailers until the request body has been read.")
-
- while True:
- line = self.rfile.readline()
- if not line:
- # No more data--illegal end of headers
- raise ValueError("Illegal end of headers.")
-
- self.bytes_read += len(line)
- if self.maxlen and self.bytes_read > self.maxlen:
- raise IOError("Request Entity Too Large")
-
- if line == CRLF:
- # Normal end of headers
- break
- if not line.endswith(CRLF):
- raise ValueError("HTTP requires CRLF terminators")
-
- yield line
-
- def close(self):
- self.rfile.close()
-
- def __iter__(self):
- # Shamelessly stolen from StringIO
- total = 0
- line = self.readline(sizehint)
- while line:
- yield line
- total += len(line)
- if 0 < sizehint <= total:
- break
- line = self.readline(sizehint)
-
-
-class HTTPRequest(object):
- """An HTTP Request (and response).
-
- A single HTTP connection may consist of multiple request/response pairs.
- """
-
- server = None
- """The HTTPServer object which is receiving this request."""
-
- conn = None
- """The HTTPConnection object on which this request connected."""
-
- inheaders = {}
- """A dict of request headers."""
-
- outheaders = []
- """A list of header tuples to write in the response."""
-
- ready = False
- """When True, the request has been parsed and is ready to begin generating
- the response. When False, signals the calling Connection that the response
- should not be generated and the connection should close."""
-
- close_connection = False
- """Signals the calling Connection that the request should close. This does
- not imply an error! The client and/or server may each request that the
- connection be closed."""
-
- chunked_write = False
- """If True, output will be encoded with the "chunked" transfer-coding.
-
- This value is set automatically inside send_headers."""
-
- def __init__(self, server, conn):
- self.server= server
- self.conn = conn
-
- self.ready = False
- self.started_request = False
- self.scheme = ntob("http")
- if self.server.ssl_adapter is not None:
- self.scheme = ntob("https")
- # Use the lowest-common protocol in case read_request_line errors.
- self.response_protocol = 'HTTP/1.0'
- self.inheaders = {}
-
- self.status = ""
- self.outheaders = []
- self.sent_headers = False
- self.close_connection = self.__class__.close_connection
- self.chunked_read = False
- self.chunked_write = self.__class__.chunked_write
-
- def parse_request(self):
- """Parse the next HTTP request start-line and message-headers."""
- self.rfile = SizeCheckWrapper(self.conn.rfile,
- self.server.max_request_header_size)
- try:
- success = self.read_request_line()
- except MaxSizeExceeded:
- self.simple_response("414 Request-URI Too Long",
- "The Request-URI sent with the request exceeds the maximum "
- "allowed bytes.")
- return
- else:
- if not success:
- return
-
- try:
- success = self.read_request_headers()
- except MaxSizeExceeded:
- self.simple_response("413 Request Entity Too Large",
- "The headers sent with the request exceed the maximum "
- "allowed bytes.")
- return
- else:
- if not success:
- return
-
- self.ready = True
-
- def read_request_line(self):
- # HTTP/1.1 connections are persistent by default. If a client
- # requests a page, then idles (leaves the connection open),
- # then rfile.readline() will raise socket.error("timed out").
- # Note that it does this based on the value given to settimeout(),
- # and doesn't need the client to request or acknowledge the close
- # (although your TCP stack might suffer for it: cf Apache's history
- # with FIN_WAIT_2).
- request_line = self.rfile.readline()
-
- # Set started_request to True so communicate() knows to send 408
- # from here on out.
- self.started_request = True
- if not request_line:
- return False
-
- if request_line == CRLF:
- # RFC 2616 sec 4.1: "...if the server is reading the protocol
- # stream at the beginning of a message and receives a CRLF
- # first, it should ignore the CRLF."
- # But only ignore one leading line! else we enable a DoS.
- request_line = self.rfile.readline()
- if not request_line:
- return False
-
- if not request_line.endswith(CRLF):
- self.simple_response("400 Bad Request", "HTTP requires CRLF terminators")
- return False
-
- try:
- method, uri, req_protocol = request_line.strip().split(SPACE, 2)
- rp = int(req_protocol[5]), int(req_protocol[7])
- except (ValueError, IndexError):
- self.simple_response("400 Bad Request", "Malformed Request-Line")
- return False
-
- self.uri = uri
- self.method = method
-
- # uri may be an abs_path (including "http://host.domain.tld");
- scheme, authority, path = self.parse_request_uri(uri)
- if NUMBER_SIGN in path:
- self.simple_response("400 Bad Request",
- "Illegal #fragment in Request-URI.")
- return False
-
- if scheme:
- self.scheme = scheme
-
- qs = EMPTY
- if QUESTION_MARK in path:
- path, qs = path.split(QUESTION_MARK, 1)
-
- # Unquote the path+params (e.g. "/this%20path" -> "/this path").
- # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
- #
- # But note that "...a URI must be separated into its components
- # before the escaped characters within those components can be
- # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
- # Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
- try:
- atoms = [unquote(x) for x in quoted_slash.split(path)]
- except ValueError:
- ex = sys.exc_info()[1]
- self.simple_response("400 Bad Request", ex.args[0])
- return False
- path = "%2F".join(atoms)
- self.path = path
-
- # Note that, like wsgiref and most other HTTP servers,
- # we "% HEX HEX"-unquote the path but not the query string.
- self.qs = qs
-
- # Compare request and server HTTP protocol versions, in case our
- # server does not support the requested protocol. Limit our output
- # to min(req, server). We want the following output:
- # request server actual written supported response
- # protocol protocol response protocol feature set
- # a 1.0 1.0 1.0 1.0
- # b 1.0 1.1 1.1 1.0
- # c 1.1 1.0 1.0 1.0
- # d 1.1 1.1 1.1 1.1
- # Notice that, in (b), the response will be "HTTP/1.1" even though
- # the client only understands 1.0. RFC 2616 10.5.6 says we should
- # only return 505 if the _major_ version is different.
- sp = int(self.server.protocol[5]), int(self.server.protocol[7])
-
- if sp[0] != rp[0]:
- self.simple_response("505 HTTP Version Not Supported")
- return False
-
- self.request_protocol = req_protocol
- self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
-
- return True
-
- def read_request_headers(self):
- """Read self.rfile into self.inheaders. Return success."""
-
- # then all the http headers
- try:
- read_headers(self.rfile, self.inheaders)
- except ValueError:
- ex = sys.exc_info()[1]
- self.simple_response("400 Bad Request", ex.args[0])
- return False
-
- mrbs = self.server.max_request_body_size
- if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs:
- self.simple_response("413 Request Entity Too Large",
- "The entity sent with the request exceeds the maximum "
- "allowed bytes.")
- return False
-
- # Persistent connection support
- if self.response_protocol == "HTTP/1.1":
- # Both server and client are HTTP/1.1
- if self.inheaders.get("Connection", "") == "close":
- self.close_connection = True
- else:
- # Either the server or client (or both) are HTTP/1.0
- if self.inheaders.get("Connection", "") != "Keep-Alive":
- self.close_connection = True
-
- # Transfer-Encoding support
- te = None
- if self.response_protocol == "HTTP/1.1":
- te = self.inheaders.get("Transfer-Encoding")
- if te:
- te = [x.strip().lower() for x in te.split(",") if x.strip()]
-
- self.chunked_read = False
-
- if te:
- for enc in te:
- if enc == "chunked":
- self.chunked_read = True
- else:
- # Note that, even if we see "chunked", we must reject
- # if there is an extension we don't recognize.
- self.simple_response("501 Unimplemented")
- self.close_connection = True
- return False
-
- # From PEP 333:
- # "Servers and gateways that implement HTTP 1.1 must provide
- # transparent support for HTTP 1.1's "expect/continue" mechanism.
- # This may be done in any of several ways:
- # 1. Respond to requests containing an Expect: 100-continue request
- # with an immediate "100 Continue" response, and proceed normally.
- # 2. Proceed with the request normally, but provide the application
- # with a wsgi.input stream that will send the "100 Continue"
- # response if/when the application first attempts to read from
- # the input stream. The read request must then remain blocked
- # until the client responds.
- # 3. Wait until the client decides that the server does not support
- # expect/continue, and sends the request body on its own.
- # (This is suboptimal, and is not recommended.)
- #
- # We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
- # but it seems like it would be a big slowdown for such a rare case.
- if self.inheaders.get("Expect", "") == "100-continue":
- # Don't use simple_response here, because it emits headers
- # we don't want. See http://www.cherrypy.org/ticket/951
- msg = self.server.protocol + " 100 Continue\r\n\r\n"
- try:
- self.conn.wfile.sendall(msg)
- except socket.error:
- x = sys.exc_info()[1]
- if x.args[0] not in socket_errors_to_ignore:
- raise
- return True
-
- def parse_request_uri(self, uri):
- """Parse a Request-URI into (scheme, authority, path).
-
- Note that Request-URI's must be one of::
-
- Request-URI = "*" | absoluteURI | abs_path | authority
-
- Therefore, a Request-URI which starts with a double forward-slash
- cannot be a "net_path"::
-
- net_path = "//" authority [ abs_path ]
-
- Instead, it must be interpreted as an "abs_path" with an empty first
- path segment::
-
- abs_path = "/" path_segments
- path_segments = segment *( "/" segment )
- segment = *pchar *( ";" param )
- param = *pchar
- """
- if uri == ASTERISK:
- return None, None, uri
-
- i = uri.find('://')
- if i > 0 and QUESTION_MARK not in uri[:i]:
- # An absoluteURI.
- # If there's a scheme (and it must be http or https), then:
- # http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
- scheme, remainder = uri[:i].lower(), uri[i + 3:]
- authority, path = remainder.split(FORWARD_SLASH, 1)
- path = FORWARD_SLASH + path
- return scheme, authority, path
-
- if uri.startswith(FORWARD_SLASH):
- # An abs_path.
- return None, None, uri
- else:
- # An authority.
- return None, uri, None
-
- def respond(self):
- """Call the gateway and write its iterable output."""
- mrbs = self.server.max_request_body_size
- if self.chunked_read:
- self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
- else:
- cl = int(self.inheaders.get("Content-Length", 0))
- if mrbs and mrbs < cl:
- if not self.sent_headers:
- self.simple_response("413 Request Entity Too Large",
- "The entity sent with the request exceeds the maximum "
- "allowed bytes.")
- return
- self.rfile = KnownLengthRFile(self.conn.rfile, cl)
-
- self.server.gateway(self).respond()
-
- if (self.ready and not self.sent_headers):
- self.sent_headers = True
- self.send_headers()
- if self.chunked_write:
- self.conn.wfile.sendall("0\r\n\r\n")
-
- def simple_response(self, status, msg=""):
- """Write a simple response back to the client."""
- status = str(status)
- buf = [self.server.protocol + SPACE +
- status + CRLF,
- "Content-Length: %s\r\n" % len(msg),
- "Content-Type: text/plain\r\n"]
-
- if status[:3] in ("413", "414"):
- # Request Entity Too Large / Request-URI Too Long
- self.close_connection = True
- if self.response_protocol == 'HTTP/1.1':
- # This will not be true for 414, since read_request_line
- # usually raises 414 before reading the whole line, and we
- # therefore cannot know the proper response_protocol.
- buf.append("Connection: close\r\n")
- else:
- # HTTP/1.0 had no 413/414 status nor Connection header.
- # Emit 400 instead and trust the message body is enough.
- status = "400 Bad Request"
-
- buf.append(CRLF)
- if msg:
- if isinstance(msg, unicodestr):
- msg = msg.encode("ISO-8859-1")
- buf.append(msg)
-
- try:
- self.conn.wfile.sendall("".join(buf))
- except socket.error:
- x = sys.exc_info()[1]
- if x.args[0] not in socket_errors_to_ignore:
- raise
-
- def write(self, chunk):
- """Write unbuffered data to the client."""
- if self.chunked_write and chunk:
- buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
- self.conn.wfile.sendall(EMPTY.join(buf))
- else:
- self.conn.wfile.sendall(chunk)
-
- def send_headers(self):
- """Assert, process, and send the HTTP response message-headers.
-
- You must set self.status, and self.outheaders before calling this.
- """
- hkeys = [key.lower() for key, value in self.outheaders]
- status = int(self.status[:3])
-
- if status == 413:
- # Request Entity Too Large. Close conn to avoid garbage.
- self.close_connection = True
- elif "content-length" not in hkeys:
- # "All 1xx (informational), 204 (no content),
- # and 304 (not modified) responses MUST NOT
- # include a message-body." So no point chunking.
- if status < 200 or status in (204, 205, 304):
- pass
- else:
- if (self.response_protocol == 'HTTP/1.1'
- and self.method != 'HEAD'):
- # Use the chunked transfer-coding
- self.chunked_write = True
- self.outheaders.append(("Transfer-Encoding", "chunked"))
- else:
- # Closing the conn is the only way to determine len.
- self.close_connection = True
-
- if "connection" not in hkeys:
- if self.response_protocol == 'HTTP/1.1':
- # Both server and client are HTTP/1.1 or better
- if self.close_connection:
- self.outheaders.append(("Connection", "close"))
- else:
- # Server and/or client are HTTP/1.0
- if not self.close_connection:
- self.outheaders.append(("Connection", "Keep-Alive"))
-
- if (not self.close_connection) and (not self.chunked_read):
- # Read any remaining request body data on the socket.
- # "If an origin server receives a request that does not include an
- # Expect request-header field with the "100-continue" expectation,
- # the request includes a request body, and the server responds
- # with a final status code before reading the entire request body
- # from the transport connection, then the server SHOULD NOT close
- # the transport connection until it has read the entire request,
- # or until the client closes the connection. Otherwise, the client
- # might not reliably receive the response message. However, this
- # requirement is not be construed as preventing a server from
- # defending itself against denial-of-service attacks, or from
- # badly broken client implementations."
- remaining = getattr(self.rfile, 'remaining', 0)
- if remaining > 0:
- self.rfile.read(remaining)
-
- if "date" not in hkeys:
- self.outheaders.append(("Date", rfc822.formatdate()))
-
- if "server" not in hkeys:
- self.outheaders.append(("Server", self.server.server_name))
-
- buf = [self.server.protocol + SPACE + self.status + CRLF]
- for k, v in self.outheaders:
- buf.append(k + COLON + SPACE + v + CRLF)
- buf.append(CRLF)
- self.conn.wfile.sendall(EMPTY.join(buf))
-
-
-class NoSSLError(Exception):
- """Exception raised when a client speaks HTTP to an HTTPS socket."""
- pass
-
-
-class FatalSSLAlert(Exception):
- """Exception raised when the SSL implementation signals a fatal alert."""
- pass
-
-
-class CP_fileobject(socket._fileobject):
- """Faux file object attached to a socket object."""
-
- def __init__(self, *args, **kwargs):
- self.bytes_read = 0
- self.bytes_written = 0
- socket._fileobject.__init__(self, *args, **kwargs)
-
- def sendall(self, data):
- """Sendall for non-blocking sockets."""
- while data:
- try:
- bytes_sent = self.send(data)
- data = data[bytes_sent:]
- except socket.error, e:
- if e.args[0] not in socket_errors_nonblocking:
- raise
-
- def send(self, data):
- bytes_sent = self._sock.send(data)
- self.bytes_written += bytes_sent
- return bytes_sent
-
- def flush(self):
- if self._wbuf:
- buffer = "".join(self._wbuf)
- self._wbuf = []
- self.sendall(buffer)
-
- def recv(self, size):
- while True:
- try:
- data = self._sock.recv(size)
- self.bytes_read += len(data)
- return data
- except socket.error, e:
- if (e.args[0] not in socket_errors_nonblocking
- and e.args[0] not in socket_error_eintr):
- raise
-
- if not _fileobject_uses_str_type:
- def read(self, size=-1):
- # Use max, disallow tiny reads in a loop as they are very inefficient.
- # We never leave read() with any leftover data from a new recv() call
- # in our internal buffer.
- rbufsize = max(self._rbufsize, self.default_bufsize)
- # Our use of StringIO rather than lists of string objects returned by
- # recv() minimizes memory usage and fragmentation that occurs when
- # rbufsize is large compared to the typical return value of recv().
- buf = self._rbuf
- buf.seek(0, 2) # seek end
- if size < 0:
- # Read until EOF
- self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
- while True:
- data = self.recv(rbufsize)
- if not data:
- break
- buf.write(data)
- return buf.getvalue()
- else:
- # Read until size bytes or EOF seen, whichever comes first
- buf_len = buf.tell()
- if buf_len >= size:
- # Already have size bytes in our buffer? Extract and return.
- buf.seek(0)
- rv = buf.read(size)
- self._rbuf = StringIO.StringIO()
- self._rbuf.write(buf.read())
- return rv
-
- self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
- while True:
- left = size - buf_len
- # recv() will malloc the amount of memory given as its
- # parameter even though it often returns much less data
- # than that. The returned data string is short lived
- # as we copy it into a StringIO and free it. This avoids
- # fragmentation issues on many platforms.
- data = self.recv(left)
- if not data:
- break
- n = len(data)
- if n == size and not buf_len:
- # Shortcut. Avoid buffer data copies when:
- # - We have no data in our buffer.
- # AND
- # - Our call to recv returned exactly the
- # number of bytes we were asked to read.
- return data
- if n == left:
- buf.write(data)
- del data # explicit free
- break
- assert n <= left, "recv(%d) returned %d bytes" % (left, n)
- buf.write(data)
- buf_len += n
- del data # explicit free
- #assert buf_len == buf.tell()
- return buf.getvalue()
-
- def readline(self, size=-1):
- buf = self._rbuf
- buf.seek(0, 2) # seek end
- if buf.tell() > 0:
- # check if we already have it in our buffer
- buf.seek(0)
- bline = buf.readline(size)
- if bline.endswith('\n') or len(bline) == size:
- self._rbuf = StringIO.StringIO()
- self._rbuf.write(buf.read())
- return bline
- del bline
- if size < 0:
- # Read until \n or EOF, whichever comes first
- if self._rbufsize <= 1:
- # Speed up unbuffered case
- buf.seek(0)
- buffers = [buf.read()]
- self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
- data = None
- recv = self.recv
- while data != "\n":
- data = recv(1)
- if not data:
- break
- buffers.append(data)
- return "".join(buffers)
-
- buf.seek(0, 2) # seek end
- self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
- while True:
- data = self.recv(self._rbufsize)
- if not data:
- break
- nl = data.find('\n')
- if nl >= 0:
- nl += 1
- buf.write(data[:nl])
- self._rbuf.write(data[nl:])
- del data
- break
- buf.write(data)
- return buf.getvalue()
- else:
- # Read until size bytes or \n or EOF seen, whichever comes first
- buf.seek(0, 2) # seek end
- buf_len = buf.tell()
- if buf_len >= size:
- buf.seek(0)
- rv = buf.read(size)
- self._rbuf = StringIO.StringIO()
- self._rbuf.write(buf.read())
- return rv
- self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
- while True:
- data = self.recv(self._rbufsize)
- if not data:
- break
- left = size - buf_len
- # did we just receive a newline?
- nl = data.find('\n', 0, left)
- if nl >= 0:
- nl += 1
- # save the excess data to _rbuf
- self._rbuf.write(data[nl:])
- if buf_len:
- buf.write(data[:nl])
- break
- else:
- # Shortcut. Avoid data copy through buf when returning
- # a substring of our first recv().
- return data[:nl]
- n = len(data)
- if n == size and not buf_len:
- # Shortcut. Avoid data copy through buf when
- # returning exactly all of our first recv().
- return data
- if n >= left:
- buf.write(data[:left])
- self._rbuf.write(data[left:])
- break
- buf.write(data)
- buf_len += n
- #assert buf_len == buf.tell()
- return buf.getvalue()
- else:
- def read(self, size=-1):
- if size < 0:
- # Read until EOF
- buffers = [self._rbuf]
- self._rbuf = ""
- if self._rbufsize <= 1:
- recv_size = self.default_bufsize
- else:
- recv_size = self._rbufsize
-
- while True:
- data = self.recv(recv_size)
- if not data:
- break
- buffers.append(data)
- return "".join(buffers)
- else:
- # Read until size bytes or EOF seen, whichever comes first
- data = self._rbuf
- buf_len = len(data)
- if buf_len >= size:
- self._rbuf = data[size:]
- return data[:size]
- buffers = []
- if data:
- buffers.append(data)
- self._rbuf = ""
- while True:
- left = size - buf_len
- recv_size = max(self._rbufsize, left)
- data = self.recv(recv_size)
- if not data:
- break
- buffers.append(data)
- n = len(data)
- if n >= left:
- self._rbuf = data[left:]
- buffers[-1] = data[:left]
- break
- buf_len += n
- return "".join(buffers)
-
- def readline(self, size=-1):
- data = self._rbuf
- if size < 0:
- # Read until \n or EOF, whichever comes first
- if self._rbufsize <= 1:
- # Speed up unbuffered case
- assert data == ""
- buffers = []
- while data != "\n":
- data = self.recv(1)
- if not data:
- break
- buffers.append(data)
- return "".join(buffers)
- nl = data.find('\n')
- if nl >= 0:
- nl += 1
- self._rbuf = data[nl:]
- return data[:nl]
- buffers = []
- if data:
- buffers.append(data)
- self._rbuf = ""
- while True:
- data = self.recv(self._rbufsize)
- if not data:
- break
- buffers.append(data)
- nl = data.find('\n')
- if nl >= 0:
- nl += 1
- self._rbuf = data[nl:]
- buffers[-1] = data[:nl]
- break
- return "".join(buffers)
- else:
- # Read until size bytes or \n or EOF seen, whichever comes first
- nl = data.find('\n', 0, size)
- if nl >= 0:
- nl += 1
- self._rbuf = data[nl:]
- return data[:nl]
- buf_len = len(data)
- if buf_len >= size:
- self._rbuf = data[size:]
- return data[:size]
- buffers = []
- if data:
- buffers.append(data)
- self._rbuf = ""
- while True:
- data = self.recv(self._rbufsize)
- if not data:
- break
- buffers.append(data)
- left = size - buf_len
- nl = data.find('\n', 0, left)
- if nl >= 0:
- nl += 1
- self._rbuf = data[nl:]
- buffers[-1] = data[:nl]
- break
- n = len(data)
- if n >= left:
- self._rbuf = data[left:]
- buffers[-1] = data[:left]
- break
- buf_len += n
- return "".join(buffers)
-
-
-class HTTPConnection(object):
- """An HTTP connection (active socket).
-
- server: the Server object which received this connection.
- socket: the raw socket object (usually TCP) for this connection.
- makefile: a fileobject class for reading from the socket.
- """
-
- remote_addr = None
- remote_port = None
- ssl_env = None
- rbufsize = DEFAULT_BUFFER_SIZE
- wbufsize = DEFAULT_BUFFER_SIZE
- RequestHandlerClass = HTTPRequest
-
- def __init__(self, server, sock, makefile=CP_fileobject):
- self.server = server
- self.socket = sock
- self.rfile = makefile(sock, "rb", self.rbufsize)
- self.wfile = makefile(sock, "wb", self.wbufsize)
- self.requests_seen = 0
-
- def communicate(self):
- """Read each request and respond appropriately."""
- request_seen = False
- try:
- while True:
- # (re)set req to None so that if something goes wrong in
- # the RequestHandlerClass constructor, the error doesn't
- # get written to the previous request.
- req = None
- req = self.RequestHandlerClass(self.server, self)
-
- # This order of operations should guarantee correct pipelining.
- req.parse_request()
- if self.server.stats['Enabled']:
- self.requests_seen += 1
- if not req.ready:
- # Something went wrong in the parsing (and the server has
- # probably already made a simple_response). Return and
- # let the conn close.
- return
-
- request_seen = True
- req.respond()
- if req.close_connection:
- return
- except socket.error:
- e = sys.exc_info()[1]
- errnum = e.args[0]
- # sadly SSL sockets return a different (longer) time out string
- if errnum == 'timed out' or errnum == 'The read operation timed out':
- # Don't error if we're between requests; only error
- # if 1) no request has been started at all, or 2) we're
- # in the middle of a request.
- # See http://www.cherrypy.org/ticket/853
- if (not request_seen) or (req and req.started_request):
- # Don't bother writing the 408 if the response
- # has already started being written.
- if req and not req.sent_headers:
- try:
- req.simple_response("408 Request Timeout")
- except FatalSSLAlert:
- # Close the connection.
- return
- elif errnum not in socket_errors_to_ignore:
- self.server.error_log("socket.error %s" % repr(errnum),
- level=logging.WARNING, traceback=True)
- if req and not req.sent_headers:
- try:
- req.simple_response("500 Internal Server Error")
- except FatalSSLAlert:
- # Close the connection.
- return
- return
- except (KeyboardInterrupt, SystemExit):
- raise
- except FatalSSLAlert:
- # Close the connection.
- return
- except NoSSLError:
- if req and not req.sent_headers:
- # Unwrap our wfile
- self.wfile = CP_fileobject(self.socket._sock, "wb", self.wbufsize)
- req.simple_response("400 Bad Request",
- "The client sent a plain HTTP request, but "
- "this server only speaks HTTPS on this port.")
- self.linger = True
- except Exception:
- e = sys.exc_info()[1]
- self.server.error_log(repr(e), level=logging.ERROR, traceback=True)
- if req and not req.sent_headers:
- try:
- req.simple_response("500 Internal Server Error")
- except FatalSSLAlert:
- # Close the connection.
- return
-
- linger = False
-
- def close(self):
- """Close the socket underlying this connection."""
- self.rfile.close()
-
- if not self.linger:
- # Python's socket module does NOT call close on the kernel socket
- # when you call socket.close(). We do so manually here because we
- # want this server to send a FIN TCP segment immediately. Note this
- # must be called *before* calling socket.close(), because the latter
- # drops its reference to the kernel socket.
- if hasattr(self.socket, '_sock'):
- self.socket._sock.close()
- self.socket.close()
- else:
- # On the other hand, sometimes we want to hang around for a bit
- # to make sure the client has a chance to read our entire
- # response. Skipping the close() calls here delays the FIN
- # packet until the socket object is garbage-collected later.
- # Someday, perhaps, we'll do the full lingering_close that
- # Apache does, but not today.
- pass
-
-
-class TrueyZero(object):
- """An object which equals and does math like the integer '0' but evals True."""
- def __add__(self, other):
- return other
- def __radd__(self, other):
- return other
-trueyzero = TrueyZero()
-
-
-_SHUTDOWNREQUEST = None
-
-class WorkerThread(threading.Thread):
- """Thread which continuously polls a Queue for Connection objects.
-
- Due to the timing issues of polling a Queue, a WorkerThread does not
- check its own 'ready' flag after it has started. To stop the thread,
- it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
- (one for each running WorkerThread).
- """
-
- conn = None
- """The current connection pulled off the Queue, or None."""
-
- server = None
- """The HTTP Server which spawned this thread, and which owns the
- Queue and is placing active connections into it."""
-
- ready = False
- """A simple flag for the calling server to know when this thread
- has begun polling the Queue."""
-
-
- def __init__(self, server):
- self.ready = False
- self.server = server
-
- self.requests_seen = 0
- self.bytes_read = 0
- self.bytes_written = 0
- self.start_time = None
- self.work_time = 0
- self.stats = {
- 'Requests': lambda s: self.requests_seen + ((self.start_time is None) and trueyzero or self.conn.requests_seen),
- 'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and trueyzero or self.conn.rfile.bytes_read),
- 'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and trueyzero or self.conn.wfile.bytes_written),
- 'Work Time': lambda s: self.work_time + ((self.start_time is None) and trueyzero or time.time() - self.start_time),
- 'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6),
- 'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6),
- }
- threading.Thread.__init__(self)
-
- def run(self):
- self.server.stats['Worker Threads'][self.getName()] = self.stats
- try:
- self.ready = True
- while True:
- conn = self.server.requests.get()
- if conn is _SHUTDOWNREQUEST:
- return
-
- self.conn = conn
- if self.server.stats['Enabled']:
- self.start_time = time.time()
- try:
- conn.communicate()
- finally:
- conn.close()
- if self.server.stats['Enabled']:
- self.requests_seen += self.conn.requests_seen
- self.bytes_read += self.conn.rfile.bytes_read
- self.bytes_written += self.conn.wfile.bytes_written
- self.work_time += time.time() - self.start_time
- self.start_time = None
- self.conn = None
- except (KeyboardInterrupt, SystemExit):
- exc = sys.exc_info()[1]
- self.server.interrupt = exc
-
-
-class ThreadPool(object):
- """A Request Queue for an HTTPServer which pools threads.
-
- ThreadPool objects must provide min, get(), put(obj), start()
- and stop(timeout) attributes.
- """
-
- def __init__(self, server, min=10, max=-1):
- self.server = server
- self.min = min
- self.max = max
- self._threads = []
- self._queue = queue.Queue()
- self.get = self._queue.get
-
- def start(self):
- """Start the pool of threads."""
- for i in range(self.min):
- self._threads.append(WorkerThread(self.server))
- for worker in self._threads:
- worker.setName("CP Server " + worker.getName())
- worker.start()
- for worker in self._threads:
- while not worker.ready:
- time.sleep(.1)
-
- def _get_idle(self):
- """Number of worker threads which are idle. Read-only."""
- return len([t for t in self._threads if t.conn is None])
- idle = property(_get_idle, doc=_get_idle.__doc__)
-
- def put(self, obj):
- self._queue.put(obj)
- if obj is _SHUTDOWNREQUEST:
- return
-
- def grow(self, amount):
- """Spawn new worker threads (not above self.max)."""
- for i in range(amount):
- if self.max > 0 and len(self._threads) >= self.max:
- break
- worker = WorkerThread(self.server)
- worker.setName("CP Server " + worker.getName())
- self._threads.append(worker)
- worker.start()
-
- def shrink(self, amount):
- """Kill off worker threads (not below self.min)."""
- # Grow/shrink the pool if necessary.
- # Remove any dead threads from our list
- for t in self._threads:
- if not t.isAlive():
- self._threads.remove(t)
- amount -= 1
-
- if amount > 0:
- for i in range(min(amount, len(self._threads) - self.min)):
- # Put a number of shutdown requests on the queue equal
- # to 'amount'. Once each of those is processed by a worker,
- # that worker will terminate and be culled from our list
- # in self.put.
- self._queue.put(_SHUTDOWNREQUEST)
-
- def stop(self, timeout=5):
- # Must shut down threads here so the code that calls
- # this method can know when all threads are stopped.
- for worker in self._threads:
- self._queue.put(_SHUTDOWNREQUEST)
-
- # Don't join currentThread (when stop is called inside a request).
- current = threading.currentThread()
- if timeout and timeout >= 0:
- endtime = time.time() + timeout
- while self._threads:
- worker = self._threads.pop()
- if worker is not current and worker.isAlive():
- try:
- if timeout is None or timeout < 0:
- worker.join()
- else:
- remaining_time = endtime - time.time()
- if remaining_time > 0:
- worker.join(remaining_time)
- if worker.isAlive():
- # We exhausted the timeout.
- # Forcibly shut down the socket.
- c = worker.conn
- if c and not c.rfile.closed:
- try:
- c.socket.shutdown(socket.SHUT_RD)
- except TypeError:
- # pyOpenSSL sockets don't take an arg
- c.socket.shutdown()
- worker.join()
- except (AssertionError,
- # Ignore repeated Ctrl-C.
- # See http://www.cherrypy.org/ticket/691.
- KeyboardInterrupt):
- pass
-
- def _get_qsize(self):
- return self._queue.qsize()
- qsize = property(_get_qsize)
-
-
-
-try:
- import fcntl
-except ImportError:
- try:
- from ctypes import windll, WinError
- except ImportError:
- def prevent_socket_inheritance(sock):
- """Dummy function, since neither fcntl nor ctypes are available."""
- pass
- else:
- def prevent_socket_inheritance(sock):
- """Mark the given socket fd as non-inheritable (Windows)."""
- if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
- raise WinError()
-else:
- def prevent_socket_inheritance(sock):
- """Mark the given socket fd as non-inheritable (POSIX)."""
- fd = sock.fileno()
- old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
- fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
-
-
-class SSLAdapter(object):
- """Base class for SSL driver library adapters.
-
- Required methods:
-
- * ``wrap(sock) -> (wrapped socket, ssl environ dict)``
- * ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object``
- """
-
- def __init__(self, certificate, private_key, certificate_chain=None):
- self.certificate = certificate
- self.private_key = private_key
- self.certificate_chain = certificate_chain
-
- def wrap(self, sock):
- raise NotImplemented
-
- def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
- raise NotImplemented
-
-
-class HTTPServer(object):
- """An HTTP server."""
-
- _bind_addr = "127.0.0.1"
- _interrupt = None
-
- gateway = None
- """A Gateway instance."""
-
- minthreads = None
- """The minimum number of worker threads to create (default 10)."""
-
- maxthreads = None
- """The maximum number of worker threads to create (default -1 = no limit)."""
-
- server_name = None
- """The name of the server; defaults to socket.gethostname()."""
-
- protocol = "HTTP/1.1"
- """The version string to write in the Status-Line of all HTTP responses.
-
- For example, "HTTP/1.1" is the default. This also limits the supported
- features used in the response."""
-
- request_queue_size = 5
- """The 'backlog' arg to socket.listen(); max queued connections (default 5)."""
-
- shutdown_timeout = 5
- """The total time, in seconds, to wait for worker threads to cleanly exit."""
-
- timeout = 10
- """The timeout in seconds for accepted connections (default 10)."""
-
- version = "CherryPy/3.2.2"
- """A version string for the HTTPServer."""
-
- software = None
- """The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
-
- If None, this defaults to ``'%s Server' % self.version``."""
-
- ready = False
- """An internal flag which marks whether the socket is accepting connections."""
-
- max_request_header_size = 0
- """The maximum size, in bytes, for request headers, or 0 for no limit."""
-
- max_request_body_size = 0
- """The maximum size, in bytes, for request bodies, or 0 for no limit."""
-
- nodelay = True
- """If True (the default since 3.1), sets the TCP_NODELAY socket option."""
-
- ConnectionClass = HTTPConnection
- """The class to use for handling HTTP connections."""
-
- ssl_adapter = None
- """An instance of SSLAdapter (or a subclass).
-
- You must have the corresponding SSL driver library installed."""
-
- def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
- server_name=None):
- self.bind_addr = bind_addr
- self.gateway = gateway
-
- self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
-
- if not server_name:
- server_name = socket.gethostname()
- self.server_name = server_name
- self.clear_stats()
-
- def clear_stats(self):
- self._start_time = None
- self._run_time = 0
- self.stats = {
- 'Enabled': False,
- 'Bind Address': lambda s: repr(self.bind_addr),
- 'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(),
- 'Accepts': 0,
- 'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
- 'Queue': lambda s: getattr(self.requests, "qsize", None),
- 'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
- 'Threads Idle': lambda s: getattr(self.requests, "idle", None),
- 'Socket Errors': 0,
- 'Requests': lambda s: (not s['Enabled']) and -1 or sum([w['Requests'](w) for w
- in s['Worker Threads'].values()], 0),
- 'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Read'](w) for w
- in s['Worker Threads'].values()], 0),
- 'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Written'](w) for w
- in s['Worker Threads'].values()], 0),
- 'Work Time': lambda s: (not s['Enabled']) and -1 or sum([w['Work Time'](w) for w
- in s['Worker Threads'].values()], 0),
- 'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum(
- [w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
- for w in s['Worker Threads'].values()], 0),
- 'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum(
- [w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
- for w in s['Worker Threads'].values()], 0),
- 'Worker Threads': {},
- }
- logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
-
- def runtime(self):
- if self._start_time is None:
- return self._run_time
- else:
- return self._run_time + (time.time() - self._start_time)
-
- def __str__(self):
- return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
- self.bind_addr)
-
- def _get_bind_addr(self):
- return self._bind_addr
- def _set_bind_addr(self, value):
- if isinstance(value, tuple) and value[0] in ('', None):
- # Despite the socket module docs, using '' does not
- # allow AI_PASSIVE to work. Passing None instead
- # returns '0.0.0.0' like we want. In other words:
- # host AI_PASSIVE result
- # '' Y 192.168.x.y
- # '' N 192.168.x.y
- # None Y 0.0.0.0
- # None N 127.0.0.1
- # But since you can get the same effect with an explicit
- # '0.0.0.0', we deny both the empty string and None as values.
- raise ValueError("Host values of '' or None are not allowed. "
- "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
- "to listen on all active interfaces.")
- self._bind_addr = value
- bind_addr = property(_get_bind_addr, _set_bind_addr,
- doc="""The interface on which to listen for connections.
-
- For TCP sockets, a (host, port) tuple. Host values may be any IPv4
- or IPv6 address, or any valid hostname. The string 'localhost' is a
- synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
- The string '0.0.0.0' is a special IPv4 entry meaning "any active
- interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
- IPv6. The empty string or None are not allowed.
-
- For UNIX sockets, supply the filename as a string.""")
-
- def start(self):
- """Run the server forever."""
- # We don't have to trap KeyboardInterrupt or SystemExit here,
- # because cherrpy.server already does so, calling self.stop() for us.
- # If you're using this server with another framework, you should
- # trap those exceptions in whatever code block calls start().
- self._interrupt = None
-
- if self.software is None:
- self.software = "%s Server" % self.version
-
- # SSL backward compatibility
- if (self.ssl_adapter is None and
- getattr(self, 'ssl_certificate', None) and
- getattr(self, 'ssl_private_key', None)):
- warnings.warn(
- "SSL attributes are deprecated in CherryPy 3.2, and will "
- "be removed in CherryPy 3.3. Use an ssl_adapter attribute "
- "instead.",
- DeprecationWarning
- )
- try:
- from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
- except ImportError:
- pass
- else:
- self.ssl_adapter = pyOpenSSLAdapter(
- self.ssl_certificate, self.ssl_private_key,
- getattr(self, 'ssl_certificate_chain', None))
-
- # Select the appropriate socket
- if isinstance(self.bind_addr, basestring):
- # AF_UNIX socket
-
- # So we can reuse the socket...
- try: os.unlink(self.bind_addr)
- except: pass
-
- # So everyone can access the socket...
- try: os.chmod(self.bind_addr, 511) # 0777
- except: pass
-
- info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
- else:
- # AF_INET or AF_INET6 socket
- # Get the correct address family for our host (allows IPv6 addresses)
- host, port = self.bind_addr
- try:
- info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
- socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
- except socket.gaierror:
- if ':' in self.bind_addr[0]:
- info = [(socket.AF_INET6, socket.SOCK_STREAM,
- 0, "", self.bind_addr + (0, 0))]
- else:
- info = [(socket.AF_INET, socket.SOCK_STREAM,
- 0, "", self.bind_addr)]
-
- self.socket = None
- msg = "No socket could be created"
- for res in info:
- af, socktype, proto, canonname, sa = res
- try:
- self.bind(af, socktype, proto)
- except socket.error:
- if self.socket:
- self.socket.close()
- self.socket = None
- continue
- break
- if not self.socket:
- raise socket.error(msg)
-
- # Timeout so KeyboardInterrupt can be caught on Win32
- self.socket.settimeout(1)
- self.socket.listen(self.request_queue_size)
-
- # Create worker threads
- self.requests.start()
-
- self.ready = True
- self._start_time = time.time()
- while self.ready:
- try:
- self.tick()
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.error_log("Error in HTTPServer.tick", level=logging.ERROR,
- traceback=True)
-
- if self.interrupt:
- while self.interrupt is True:
- # Wait for self.stop() to complete. See _set_interrupt.
- time.sleep(0.1)
- if self.interrupt:
- raise self.interrupt
-
- def error_log(self, msg="", level=20, traceback=False):
- # Override this in subclasses as desired
- sys.stderr.write(msg + '\n')
- sys.stderr.flush()
- if traceback:
- tblines = format_exc()
- sys.stderr.write(tblines)
- sys.stderr.flush()
-
- def bind(self, family, type, proto=0):
- """Create (or recreate) the actual socket object."""
- self.socket = socket.socket(family, type, proto)
- prevent_socket_inheritance(self.socket)
- self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- if self.nodelay and not isinstance(self.bind_addr, str):
- self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-
- if self.ssl_adapter is not None:
- self.socket = self.ssl_adapter.bind(self.socket)
-
- # If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
- # activate dual-stack. See http://www.cherrypy.org/ticket/871.
- if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
- and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
- try:
- self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
- except (AttributeError, socket.error):
- # Apparently, the socket option is not available in
- # this machine's TCP stack
- pass
-
- self.socket.bind(self.bind_addr)
-
- def tick(self):
- """Accept a new connection and put it on the Queue."""
- try:
- s, addr = self.socket.accept()
- if self.stats['Enabled']:
- self.stats['Accepts'] += 1
- if not self.ready:
- return
-
- prevent_socket_inheritance(s)
- if hasattr(s, 'settimeout'):
- s.settimeout(self.timeout)
-
- makefile = CP_fileobject
- ssl_env = {}
- # if ssl cert and key are set, we try to be a secure HTTP server
- if self.ssl_adapter is not None:
- try:
- s, ssl_env = self.ssl_adapter.wrap(s)
- except NoSSLError:
- msg = ("The client sent a plain HTTP request, but "
- "this server only speaks HTTPS on this port.")
- buf = ["%s 400 Bad Request\r\n" % self.protocol,
- "Content-Length: %s\r\n" % len(msg),
- "Content-Type: text/plain\r\n\r\n",
- msg]
-
- wfile = makefile(s, "wb", DEFAULT_BUFFER_SIZE)
- try:
- wfile.sendall("".join(buf))
- except socket.error:
- x = sys.exc_info()[1]
- if x.args[0] not in socket_errors_to_ignore:
- raise
- return
- if not s:
- return
- makefile = self.ssl_adapter.makefile
- # Re-apply our timeout since we may have a new socket object
- if hasattr(s, 'settimeout'):
- s.settimeout(self.timeout)
-
- conn = self.ConnectionClass(self, s, makefile)
-
- if not isinstance(self.bind_addr, basestring):
- # optional values
- # Until we do DNS lookups, omit REMOTE_HOST
- if addr is None: # sometimes this can happen
- # figure out if AF_INET or AF_INET6.
- if len(s.getsockname()) == 2:
- # AF_INET
- addr = ('0.0.0.0', 0)
- else:
- # AF_INET6
- addr = ('::', 0)
- conn.remote_addr = addr[0]
- conn.remote_port = addr[1]
-
- conn.ssl_env = ssl_env
-
- self.requests.put(conn)
- except socket.timeout:
- # The only reason for the timeout in start() is so we can
- # notice keyboard interrupts on Win32, which don't interrupt
- # accept() by default
- return
- except socket.error:
- x = sys.exc_info()[1]
- if self.stats['Enabled']:
- self.stats['Socket Errors'] += 1
- if x.args[0] in socket_error_eintr:
- # I *think* this is right. EINTR should occur when a signal
- # is received during the accept() call; all docs say retry
- # the call, and I *think* I'm reading it right that Python
- # will then go ahead and poll for and handle the signal
- # elsewhere. See http://www.cherrypy.org/ticket/707.
- return
- if x.args[0] in socket_errors_nonblocking:
- # Just try again. See http://www.cherrypy.org/ticket/479.
- return
- if x.args[0] in socket_errors_to_ignore:
- # Our socket was closed.
- # See http://www.cherrypy.org/ticket/686.
- return
- raise
-
- def _get_interrupt(self):
- return self._interrupt
- def _set_interrupt(self, interrupt):
- self._interrupt = True
- self.stop()
- self._interrupt = interrupt
- interrupt = property(_get_interrupt, _set_interrupt,
- doc="Set this to an Exception instance to "
- "interrupt the server.")
-
- def stop(self):
- """Gracefully shutdown a server that is serving forever."""
- self.ready = False
- if self._start_time is not None:
- self._run_time += (time.time() - self._start_time)
- self._start_time = None
-
- sock = getattr(self, "socket", None)
- if sock:
- if not isinstance(self.bind_addr, basestring):
- # Touch our own socket to make accept() return immediately.
- try:
- host, port = sock.getsockname()[:2]
- except socket.error:
- x = sys.exc_info()[1]
- if x.args[0] not in socket_errors_to_ignore:
- # Changed to use error code and not message
- # See http://www.cherrypy.org/ticket/860.
- raise
- else:
- # Note that we're explicitly NOT using AI_PASSIVE,
- # here, because we want an actual IP to touch.
- # localhost won't work if we've bound to a public IP,
- # but it will if we bound to '0.0.0.0' (INADDR_ANY).
- for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
- socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- s = None
- try:
- s = socket.socket(af, socktype, proto)
- # See http://groups.google.com/group/cherrypy-users/
- # browse_frm/thread/bbfe5eb39c904fe0
- s.settimeout(1.0)
- s.connect((host, port))
- s.close()
- except socket.error:
- if s:
- s.close()
- if hasattr(sock, "close"):
- sock.close()
- self.socket = None
-
- self.requests.stop(self.shutdown_timeout)
-
-
-class Gateway(object):
- """A base class to interface HTTPServer with other systems, such as WSGI."""
-
- def __init__(self, req):
- self.req = req
-
- def respond(self):
- """Process the current request. Must be overridden in a subclass."""
- raise NotImplemented
-
-
-# These may either be wsgiserver.SSLAdapter subclasses or the string names
-# of such classes (in which case they will be lazily loaded).
-ssl_adapters = {
- 'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
- 'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
- }
-
-def get_ssl_adapter_class(name='pyopenssl'):
- """Return an SSL adapter class for the given name."""
- adapter = ssl_adapters[name.lower()]
- if isinstance(adapter, basestring):
- last_dot = adapter.rfind(".")
- attr_name = adapter[last_dot + 1:]
- mod_path = adapter[:last_dot]
-
- try:
- mod = sys.modules[mod_path]
- if mod is None:
- raise KeyError()
- except KeyError:
- # The last [''] is important.
- mod = __import__(mod_path, globals(), locals(), [''])
-
- # Let an AttributeError propagate outward.
- try:
- adapter = getattr(mod, attr_name)
- except AttributeError:
- raise AttributeError("'%s' object has no attribute '%s'"
- % (mod_path, attr_name))
-
- return adapter
-
-# -------------------------------- WSGI Stuff -------------------------------- #
-
-
-class CherryPyWSGIServer(HTTPServer):
- """A subclass of HTTPServer which calls a WSGI application."""
-
- wsgi_version = (1, 0)
- """The version of WSGI to produce."""
-
- def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
- max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
- self.requests = ThreadPool(self, min=numthreads or 1, max=max)
- self.wsgi_app = wsgi_app
- self.gateway = wsgi_gateways[self.wsgi_version]
-
- self.bind_addr = bind_addr
- if not server_name:
- server_name = socket.gethostname()
- self.server_name = server_name
- self.request_queue_size = request_queue_size
-
- self.timeout = timeout
- self.shutdown_timeout = shutdown_timeout
- self.clear_stats()
-
- def _get_numthreads(self):
- return self.requests.min
- def _set_numthreads(self, value):
- self.requests.min = value
- numthreads = property(_get_numthreads, _set_numthreads)
-
-
-class WSGIGateway(Gateway):
- """A base class to interface HTTPServer with WSGI."""
-
- def __init__(self, req):
- self.req = req
- self.started_response = False
- self.env = self.get_environ()
- self.remaining_bytes_out = None
-
- def get_environ(self):
- """Return a new environ dict targeting the given wsgi.version"""
- raise NotImplemented
-
- def respond(self):
- """Process the current request."""
- response = self.req.server.wsgi_app(self.env, self.start_response)
- try:
- for chunk in response:
- # "The start_response callable must not actually transmit
- # the response headers. Instead, it must store them for the
- # server or gateway to transmit only after the first
- # iteration of the application return value that yields
- # a NON-EMPTY string, or upon the application's first
- # invocation of the write() callable." (PEP 333)
- if chunk:
- if isinstance(chunk, unicodestr):
- chunk = chunk.encode('ISO-8859-1')
- self.write(chunk)
- finally:
- if hasattr(response, "close"):
- response.close()
-
- def start_response(self, status, headers, exc_info = None):
- """WSGI callable to begin the HTTP response."""
- # "The application may call start_response more than once,
- # if and only if the exc_info argument is provided."
- if self.started_response and not exc_info:
- raise AssertionError("WSGI start_response called a second "
- "time with no exc_info.")
- self.started_response = True
-
- # "if exc_info is provided, and the HTTP headers have already been
- # sent, start_response must raise an error, and should raise the
- # exc_info tuple."
- if self.req.sent_headers:
- try:
- raise exc_info[0], exc_info[1], exc_info[2]
- finally:
- exc_info = None
-
- self.req.status = status
- for k, v in headers:
- if not isinstance(k, str):
- raise TypeError("WSGI response header key %r is not of type str." % k)
- if not isinstance(v, str):
- raise TypeError("WSGI response header value %r is not of type str." % v)
- if k.lower() == 'content-length':
- self.remaining_bytes_out = int(v)
- self.req.outheaders.extend(headers)
-
- return self.write
-
- def write(self, chunk):
- """WSGI callable to write unbuffered data to the client.
-
- This method is also used internally by start_response (to write
- data from the iterable returned by the WSGI application).
- """
- if not self.started_response:
- raise AssertionError("WSGI write called before start_response.")
-
- chunklen = len(chunk)
- rbo = self.remaining_bytes_out
- if rbo is not None and chunklen > rbo:
- if not self.req.sent_headers:
- # Whew. We can send a 500 to the client.
- self.req.simple_response("500 Internal Server Error",
- "The requested resource returned more bytes than the "
- "declared Content-Length.")
- else:
- # Dang. We have probably already sent data. Truncate the chunk
- # to fit (so the client doesn't hang) and raise an error later.
- chunk = chunk[:rbo]
-
- if not self.req.sent_headers:
- self.req.sent_headers = True
- self.req.send_headers()
-
- self.req.write(chunk)
-
- if rbo is not None:
- rbo -= chunklen
- if rbo < 0:
- raise ValueError(
- "Response body exceeds the declared Content-Length.")
-
-
-class WSGIGateway_10(WSGIGateway):
- """A Gateway class to interface HTTPServer with WSGI 1.0.x."""
-
- def get_environ(self):
- """Return a new environ dict targeting the given wsgi.version"""
- req = self.req
- env = {
- # set a non-standard environ entry so the WSGI app can know what
- # the *real* server protocol is (and what features to support).
- # See http://www.faqs.org/rfcs/rfc2145.html.
- 'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
- 'PATH_INFO': req.path,
- 'QUERY_STRING': req.qs,
- 'REMOTE_ADDR': req.conn.remote_addr or '',
- 'REMOTE_PORT': str(req.conn.remote_port or ''),
- 'REQUEST_METHOD': req.method,
- 'REQUEST_URI': req.uri,
- 'SCRIPT_NAME': '',
- 'SERVER_NAME': req.server.server_name,
- # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
- 'SERVER_PROTOCOL': req.request_protocol,
- 'SERVER_SOFTWARE': req.server.software,
- 'wsgi.errors': sys.stderr,
- 'wsgi.input': req.rfile,
- 'wsgi.multiprocess': False,
- 'wsgi.multithread': True,
- 'wsgi.run_once': False,
- 'wsgi.url_scheme': req.scheme,
- 'wsgi.version': (1, 0),
- }
-
- if isinstance(req.server.bind_addr, basestring):
- # AF_UNIX. This isn't really allowed by WSGI, which doesn't
- # address unix domain sockets. But it's better than nothing.
- env["SERVER_PORT"] = ""
- else:
- env["SERVER_PORT"] = str(req.server.bind_addr[1])
-
- # Request headers
- for k, v in req.inheaders.iteritems():
- env["HTTP_" + k.upper().replace("-", "_")] = v
-
- # CONTENT_TYPE/CONTENT_LENGTH
- ct = env.pop("HTTP_CONTENT_TYPE", None)
- if ct is not None:
- env["CONTENT_TYPE"] = ct
- cl = env.pop("HTTP_CONTENT_LENGTH", None)
- if cl is not None:
- env["CONTENT_LENGTH"] = cl
-
- if req.conn.ssl_env:
- env.update(req.conn.ssl_env)
-
- return env
-
-
-class WSGIGateway_u0(WSGIGateway_10):
- """A Gateway class to interface HTTPServer with WSGI u.0.
-
- WSGI u.0 is an experimental protocol, which uses unicode for keys and values
- in both Python 2 and Python 3.
- """
-
- def get_environ(self):
- """Return a new environ dict targeting the given wsgi.version"""
- req = self.req
- env_10 = WSGIGateway_10.get_environ(self)
- env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()])
- env[u'wsgi.version'] = ('u', 0)
-
- # Request-URI
- env.setdefault(u'wsgi.url_encoding', u'utf-8')
- try:
- for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
- env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
- except UnicodeDecodeError:
- # Fall back to latin 1 so apps can transcode if needed.
- env[u'wsgi.url_encoding'] = u'ISO-8859-1'
- for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
- env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
-
- for k, v in sorted(env.items()):
- if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'):
- env[k] = v.decode('ISO-8859-1')
-
- return env
-
-wsgi_gateways = {
- (1, 0): WSGIGateway_10,
- ('u', 0): WSGIGateway_u0,
-}
-
-class WSGIPathInfoDispatcher(object):
- """A WSGI dispatcher for dispatch based on the PATH_INFO.
-
- apps: a dict or list of (path_prefix, app) pairs.
- """
-
- def __init__(self, apps):
- try:
- apps = list(apps.items())
- except AttributeError:
- pass
-
- # Sort the apps by len(path), descending
- apps.sort(cmp=lambda x,y: cmp(len(x[0]), len(y[0])))
- apps.reverse()
-
- # The path_prefix strings must start, but not end, with a slash.
- # Use "" instead of "/".
- self.apps = [(p.rstrip("/"), a) for p, a in apps]
-
- def __call__(self, environ, start_response):
- path = environ["PATH_INFO"] or "/"
- for p, app in self.apps:
- # The apps list should be sorted by length, descending.
- if path.startswith(p + "/") or path == p:
- environ = environ.copy()
- environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
- environ["PATH_INFO"] = path[len(p):]
- return app(environ, start_response)
-
- start_response('404 Not Found', [('Content-Type', 'text/plain'),
- ('Content-Length', '0')])
- return ['']
-
diff --git a/python-packages/cherrypy/wsgiserver/wsgiserver3.py b/python-packages/cherrypy/wsgiserver/wsgiserver3.py
deleted file mode 100644
index 62db5ffd3b..0000000000
--- a/python-packages/cherrypy/wsgiserver/wsgiserver3.py
+++ /dev/null
@@ -1,2040 +0,0 @@
-"""A high-speed, production ready, thread pooled, generic HTTP server.
-
-Simplest example on how to use this module directly
-(without using CherryPy's application machinery)::
-
- from cherrypy import wsgiserver
-
- def my_crazy_app(environ, start_response):
- status = '200 OK'
- response_headers = [('Content-type','text/plain')]
- start_response(status, response_headers)
- return ['Hello world!']
-
- server = wsgiserver.CherryPyWSGIServer(
- ('0.0.0.0', 8070), my_crazy_app,
- server_name='www.cherrypy.example')
- server.start()
-
-The CherryPy WSGI server can serve as many WSGI applications
-as you want in one instance by using a WSGIPathInfoDispatcher::
-
- d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
- server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
-
-Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
-
-This won't call the CherryPy engine (application side) at all, only the
-HTTP server, which is independent from the rest of CherryPy. Don't
-let the name "CherryPyWSGIServer" throw you; the name merely reflects
-its origin, not its coupling.
-
-For those of you wanting to understand internals of this module, here's the
-basic call flow. The server's listening thread runs a very tight loop,
-sticking incoming connections onto a Queue::
-
- server = CherryPyWSGIServer(...)
- server.start()
- while True:
- tick()
- # This blocks until a request comes in:
- child = socket.accept()
- conn = HTTPConnection(child, ...)
- server.requests.put(conn)
-
-Worker threads are kept in a pool and poll the Queue, popping off and then
-handling each connection in turn. Each connection can consist of an arbitrary
-number of requests and their responses, so we run a nested loop::
-
- while True:
- conn = server.requests.get()
- conn.communicate()
- -> while True:
- req = HTTPRequest(...)
- req.parse_request()
- -> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
- req.rfile.readline()
- read_headers(req.rfile, req.inheaders)
- req.respond()
- -> response = app(...)
- try:
- for chunk in response:
- if chunk:
- req.write(chunk)
- finally:
- if hasattr(response, "close"):
- response.close()
- if req.close_connection:
- return
-"""
-
-__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
- 'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
- 'CP_makefile',
- 'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
- 'WorkerThread', 'ThreadPool', 'SSLAdapter',
- 'CherryPyWSGIServer',
- 'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
- 'WSGIPathInfoDispatcher', 'get_ssl_adapter_class']
-
-import os
-try:
- import queue
-except:
- import Queue as queue
-import re
-import email.utils
-import socket
-import sys
-if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'):
- socket.IPPROTO_IPV6 = 41
-if sys.version_info < (3,1):
- import io
-else:
- import _pyio as io
-DEFAULT_BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE
-
-import threading
-import time
-from traceback import format_exc
-from urllib.parse import unquote
-from urllib.parse import urlparse
-from urllib.parse import scheme_chars
-import warnings
-
-if sys.version_info >= (3, 0):
- bytestr = bytes
- unicodestr = str
- basestring = (bytes, str)
- def ntob(n, encoding='ISO-8859-1'):
- """Return the given native string as a byte string in the given encoding."""
- # In Python 3, the native string type is unicode
- return n.encode(encoding)
-else:
- bytestr = str
- unicodestr = unicode
- basestring = basestring
- def ntob(n, encoding='ISO-8859-1'):
- """Return the given native string as a byte string in the given encoding."""
- # In Python 2, the native string type is bytes. Assume it's already
- # in the given encoding, which for ISO-8859-1 is almost always what
- # was intended.
- return n
-
-LF = ntob('\n')
-CRLF = ntob('\r\n')
-TAB = ntob('\t')
-SPACE = ntob(' ')
-COLON = ntob(':')
-SEMICOLON = ntob(';')
-EMPTY = ntob('')
-NUMBER_SIGN = ntob('#')
-QUESTION_MARK = ntob('?')
-ASTERISK = ntob('*')
-FORWARD_SLASH = ntob('/')
-quoted_slash = re.compile(ntob("(?i)%2F"))
-
-import errno
-
-def plat_specific_errors(*errnames):
- """Return error numbers for all errors in errnames on this platform.
-
- The 'errno' module contains different global constants depending on
- the specific platform (OS). This function will return the list of
- numeric values for a given list of potential names.
- """
- errno_names = dir(errno)
- nums = [getattr(errno, k) for k in errnames if k in errno_names]
- # de-dupe the list
- return list(dict.fromkeys(nums).keys())
-
-socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
-
-socket_errors_to_ignore = plat_specific_errors(
- "EPIPE",
- "EBADF", "WSAEBADF",
- "ENOTSOCK", "WSAENOTSOCK",
- "ETIMEDOUT", "WSAETIMEDOUT",
- "ECONNREFUSED", "WSAECONNREFUSED",
- "ECONNRESET", "WSAECONNRESET",
- "ECONNABORTED", "WSAECONNABORTED",
- "ENETRESET", "WSAENETRESET",
- "EHOSTDOWN", "EHOSTUNREACH",
- )
-socket_errors_to_ignore.append("timed out")
-socket_errors_to_ignore.append("The read operation timed out")
-
-socket_errors_nonblocking = plat_specific_errors(
- 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
-
-comma_separated_headers = [ntob(h) for h in
- ['Accept', 'Accept-Charset', 'Accept-Encoding',
- 'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
- 'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
- 'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
- 'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
- 'WWW-Authenticate']]
-
-
-import logging
-if not hasattr(logging, 'statistics'): logging.statistics = {}
-
-
-def read_headers(rfile, hdict=None):
- """Read headers from the given stream into the given header dict.
-
- If hdict is None, a new header dict is created. Returns the populated
- header dict.
-
- Headers which are repeated are folded together using a comma if their
- specification so dictates.
-
- This function raises ValueError when the read bytes violate the HTTP spec.
- You should probably return "400 Bad Request" if this happens.
- """
- if hdict is None:
- hdict = {}
-
- while True:
- line = rfile.readline()
- if not line:
- # No more data--illegal end of headers
- raise ValueError("Illegal end of headers.")
-
- if line == CRLF:
- # Normal end of headers
- break
- if not line.endswith(CRLF):
- raise ValueError("HTTP requires CRLF terminators")
-
- if line[0] in (SPACE, TAB):
- # It's a continuation line.
- v = line.strip()
- else:
- try:
- k, v = line.split(COLON, 1)
- except ValueError:
- raise ValueError("Illegal header line.")
- # TODO: what about TE and WWW-Authenticate?
- k = k.strip().title()
- v = v.strip()
- hname = k
-
- if k in comma_separated_headers:
- existing = hdict.get(hname)
- if existing:
- v = b", ".join((existing, v))
- hdict[hname] = v
-
- return hdict
-
-
-class MaxSizeExceeded(Exception):
- pass
-
-class SizeCheckWrapper(object):
- """Wraps a file-like object, raising MaxSizeExceeded if too large."""
-
- def __init__(self, rfile, maxlen):
- self.rfile = rfile
- self.maxlen = maxlen
- self.bytes_read = 0
-
- def _check_length(self):
- if self.maxlen and self.bytes_read > self.maxlen:
- raise MaxSizeExceeded()
-
- def read(self, size=None):
- data = self.rfile.read(size)
- self.bytes_read += len(data)
- self._check_length()
- return data
-
- def readline(self, size=None):
- if size is not None:
- data = self.rfile.readline(size)
- self.bytes_read += len(data)
- self._check_length()
- return data
-
- # User didn't specify a size ...
- # We read the line in chunks to make sure it's not a 100MB line !
- res = []
- while True:
- data = self.rfile.readline(256)
- self.bytes_read += len(data)
- self._check_length()
- res.append(data)
- # See http://www.cherrypy.org/ticket/421
- if len(data) < 256 or data[-1:] == "\n":
- return EMPTY.join(res)
-
- def readlines(self, sizehint=0):
- # Shamelessly stolen from StringIO
- total = 0
- lines = []
- line = self.readline()
- while line:
- lines.append(line)
- total += len(line)
- if 0 < sizehint <= total:
- break
- line = self.readline()
- return lines
-
- def close(self):
- self.rfile.close()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- data = next(self.rfile)
- self.bytes_read += len(data)
- self._check_length()
- return data
-
- def next(self):
- data = self.rfile.next()
- self.bytes_read += len(data)
- self._check_length()
- return data
-
-
-class KnownLengthRFile(object):
- """Wraps a file-like object, returning an empty string when exhausted."""
-
- def __init__(self, rfile, content_length):
- self.rfile = rfile
- self.remaining = content_length
-
- def read(self, size=None):
- if self.remaining == 0:
- return b''
- if size is None:
- size = self.remaining
- else:
- size = min(size, self.remaining)
-
- data = self.rfile.read(size)
- self.remaining -= len(data)
- return data
-
- def readline(self, size=None):
- if self.remaining == 0:
- return b''
- if size is None:
- size = self.remaining
- else:
- size = min(size, self.remaining)
-
- data = self.rfile.readline(size)
- self.remaining -= len(data)
- return data
-
- def readlines(self, sizehint=0):
- # Shamelessly stolen from StringIO
- total = 0
- lines = []
- line = self.readline(sizehint)
- while line:
- lines.append(line)
- total += len(line)
- if 0 < sizehint <= total:
- break
- line = self.readline(sizehint)
- return lines
-
- def close(self):
- self.rfile.close()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- data = next(self.rfile)
- self.remaining -= len(data)
- return data
-
-
-class ChunkedRFile(object):
- """Wraps a file-like object, returning an empty string when exhausted.
-
- This class is intended to provide a conforming wsgi.input value for
- request entities that have been encoded with the 'chunked' transfer
- encoding.
- """
-
- def __init__(self, rfile, maxlen, bufsize=8192):
- self.rfile = rfile
- self.maxlen = maxlen
- self.bytes_read = 0
- self.buffer = EMPTY
- self.bufsize = bufsize
- self.closed = False
-
- def _fetch(self):
- if self.closed:
- return
-
- line = self.rfile.readline()
- self.bytes_read += len(line)
-
- if self.maxlen and self.bytes_read > self.maxlen:
- raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
-
- line = line.strip().split(SEMICOLON, 1)
-
- try:
- chunk_size = line.pop(0)
- chunk_size = int(chunk_size, 16)
- except ValueError:
- raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
-
- if chunk_size <= 0:
- self.closed = True
- return
-
-## if line: chunk_extension = line[0]
-
- if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
- raise IOError("Request Entity Too Large")
-
- chunk = self.rfile.read(chunk_size)
- self.bytes_read += len(chunk)
- self.buffer += chunk
-
- crlf = self.rfile.read(2)
- if crlf != CRLF:
- raise ValueError(
- "Bad chunked transfer coding (expected '\\r\\n', "
- "got " + repr(crlf) + ")")
-
- def read(self, size=None):
- data = EMPTY
- while True:
- if size and len(data) >= size:
- return data
-
- if not self.buffer:
- self._fetch()
- if not self.buffer:
- # EOF
- return data
-
- if size:
- remaining = size - len(data)
- data += self.buffer[:remaining]
- self.buffer = self.buffer[remaining:]
- else:
- data += self.buffer
-
- def readline(self, size=None):
- data = EMPTY
- while True:
- if size and len(data) >= size:
- return data
-
- if not self.buffer:
- self._fetch()
- if not self.buffer:
- # EOF
- return data
-
- newline_pos = self.buffer.find(LF)
- if size:
- if newline_pos == -1:
- remaining = size - len(data)
- data += self.buffer[:remaining]
- self.buffer = self.buffer[remaining:]
- else:
- remaining = min(size - len(data), newline_pos)
- data += self.buffer[:remaining]
- self.buffer = self.buffer[remaining:]
- else:
- if newline_pos == -1:
- data += self.buffer
- else:
- data += self.buffer[:newline_pos]
- self.buffer = self.buffer[newline_pos:]
-
- def readlines(self, sizehint=0):
- # Shamelessly stolen from StringIO
- total = 0
- lines = []
- line = self.readline(sizehint)
- while line:
- lines.append(line)
- total += len(line)
- if 0 < sizehint <= total:
- break
- line = self.readline(sizehint)
- return lines
-
- def read_trailer_lines(self):
- if not self.closed:
- raise ValueError(
- "Cannot read trailers until the request body has been read.")
-
- while True:
- line = self.rfile.readline()
- if not line:
- # No more data--illegal end of headers
- raise ValueError("Illegal end of headers.")
-
- self.bytes_read += len(line)
- if self.maxlen and self.bytes_read > self.maxlen:
- raise IOError("Request Entity Too Large")
-
- if line == CRLF:
- # Normal end of headers
- break
- if not line.endswith(CRLF):
- raise ValueError("HTTP requires CRLF terminators")
-
- yield line
-
- def close(self):
- self.rfile.close()
-
- def __iter__(self):
- # Shamelessly stolen from StringIO
- total = 0
- line = self.readline(sizehint)
- while line:
- yield line
- total += len(line)
- if 0 < sizehint <= total:
- break
- line = self.readline(sizehint)
-
-
-class HTTPRequest(object):
- """An HTTP Request (and response).
-
- A single HTTP connection may consist of multiple request/response pairs.
- """
-
- server = None
- """The HTTPServer object which is receiving this request."""
-
- conn = None
- """The HTTPConnection object on which this request connected."""
-
- inheaders = {}
- """A dict of request headers."""
-
- outheaders = []
- """A list of header tuples to write in the response."""
-
- ready = False
- """When True, the request has been parsed and is ready to begin generating
- the response. When False, signals the calling Connection that the response
- should not be generated and the connection should close."""
-
- close_connection = False
- """Signals the calling Connection that the request should close. This does
- not imply an error! The client and/or server may each request that the
- connection be closed."""
-
- chunked_write = False
- """If True, output will be encoded with the "chunked" transfer-coding.
-
- This value is set automatically inside send_headers."""
-
- def __init__(self, server, conn):
- self.server= server
- self.conn = conn
-
- self.ready = False
- self.started_request = False
- self.scheme = ntob("http")
- if self.server.ssl_adapter is not None:
- self.scheme = ntob("https")
- # Use the lowest-common protocol in case read_request_line errors.
- self.response_protocol = 'HTTP/1.0'
- self.inheaders = {}
-
- self.status = ""
- self.outheaders = []
- self.sent_headers = False
- self.close_connection = self.__class__.close_connection
- self.chunked_read = False
- self.chunked_write = self.__class__.chunked_write
-
- def parse_request(self):
- """Parse the next HTTP request start-line and message-headers."""
- self.rfile = SizeCheckWrapper(self.conn.rfile,
- self.server.max_request_header_size)
- try:
- success = self.read_request_line()
- except MaxSizeExceeded:
- self.simple_response("414 Request-URI Too Long",
- "The Request-URI sent with the request exceeds the maximum "
- "allowed bytes.")
- return
- else:
- if not success:
- return
-
- try:
- success = self.read_request_headers()
- except MaxSizeExceeded:
- self.simple_response("413 Request Entity Too Large",
- "The headers sent with the request exceed the maximum "
- "allowed bytes.")
- return
- else:
- if not success:
- return
-
- self.ready = True
-
- def read_request_line(self):
- # HTTP/1.1 connections are persistent by default. If a client
- # requests a page, then idles (leaves the connection open),
- # then rfile.readline() will raise socket.error("timed out").
- # Note that it does this based on the value given to settimeout(),
- # and doesn't need the client to request or acknowledge the close
- # (although your TCP stack might suffer for it: cf Apache's history
- # with FIN_WAIT_2).
- request_line = self.rfile.readline()
-
- # Set started_request to True so communicate() knows to send 408
- # from here on out.
- self.started_request = True
- if not request_line:
- return False
-
- if request_line == CRLF:
- # RFC 2616 sec 4.1: "...if the server is reading the protocol
- # stream at the beginning of a message and receives a CRLF
- # first, it should ignore the CRLF."
- # But only ignore one leading line! else we enable a DoS.
- request_line = self.rfile.readline()
- if not request_line:
- return False
-
- if not request_line.endswith(CRLF):
- self.simple_response("400 Bad Request", "HTTP requires CRLF terminators")
- return False
-
- try:
- method, uri, req_protocol = request_line.strip().split(SPACE, 2)
- # The [x:y] slicing is necessary for byte strings to avoid getting ord's
- rp = int(req_protocol[5:6]), int(req_protocol[7:8])
- except ValueError:
- self.simple_response("400 Bad Request", "Malformed Request-Line")
- return False
-
- self.uri = uri
- self.method = method
-
- # uri may be an abs_path (including "http://host.domain.tld");
- scheme, authority, path = self.parse_request_uri(uri)
- if NUMBER_SIGN in path:
- self.simple_response("400 Bad Request",
- "Illegal #fragment in Request-URI.")
- return False
-
- if scheme:
- self.scheme = scheme
-
- qs = EMPTY
- if QUESTION_MARK in path:
- path, qs = path.split(QUESTION_MARK, 1)
-
- # Unquote the path+params (e.g. "/this%20path" -> "/this path").
- # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
- #
- # But note that "...a URI must be separated into its components
- # before the escaped characters within those components can be
- # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
- # Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
- try:
- atoms = [self.unquote_bytes(x) for x in quoted_slash.split(path)]
- except ValueError:
- ex = sys.exc_info()[1]
- self.simple_response("400 Bad Request", ex.args[0])
- return False
- path = b"%2F".join(atoms)
- self.path = path
-
- # Note that, like wsgiref and most other HTTP servers,
- # we "% HEX HEX"-unquote the path but not the query string.
- self.qs = qs
-
- # Compare request and server HTTP protocol versions, in case our
- # server does not support the requested protocol. Limit our output
- # to min(req, server). We want the following output:
- # request server actual written supported response
- # protocol protocol response protocol feature set
- # a 1.0 1.0 1.0 1.0
- # b 1.0 1.1 1.1 1.0
- # c 1.1 1.0 1.0 1.0
- # d 1.1 1.1 1.1 1.1
- # Notice that, in (b), the response will be "HTTP/1.1" even though
- # the client only understands 1.0. RFC 2616 10.5.6 says we should
- # only return 505 if the _major_ version is different.
- # The [x:y] slicing is necessary for byte strings to avoid getting ord's
- sp = int(self.server.protocol[5:6]), int(self.server.protocol[7:8])
-
- if sp[0] != rp[0]:
- self.simple_response("505 HTTP Version Not Supported")
- return False
-
- self.request_protocol = req_protocol
- self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
- return True
-
- def read_request_headers(self):
- """Read self.rfile into self.inheaders. Return success."""
-
- # then all the http headers
- try:
- read_headers(self.rfile, self.inheaders)
- except ValueError:
- ex = sys.exc_info()[1]
- self.simple_response("400 Bad Request", ex.args[0])
- return False
-
- mrbs = self.server.max_request_body_size
- if mrbs and int(self.inheaders.get(b"Content-Length", 0)) > mrbs:
- self.simple_response("413 Request Entity Too Large",
- "The entity sent with the request exceeds the maximum "
- "allowed bytes.")
- return False
-
- # Persistent connection support
- if self.response_protocol == "HTTP/1.1":
- # Both server and client are HTTP/1.1
- if self.inheaders.get(b"Connection", b"") == b"close":
- self.close_connection = True
- else:
- # Either the server or client (or both) are HTTP/1.0
- if self.inheaders.get(b"Connection", b"") != b"Keep-Alive":
- self.close_connection = True
-
- # Transfer-Encoding support
- te = None
- if self.response_protocol == "HTTP/1.1":
- te = self.inheaders.get(b"Transfer-Encoding")
- if te:
- te = [x.strip().lower() for x in te.split(b",") if x.strip()]
-
- self.chunked_read = False
-
- if te:
- for enc in te:
- if enc == b"chunked":
- self.chunked_read = True
- else:
- # Note that, even if we see "chunked", we must reject
- # if there is an extension we don't recognize.
- self.simple_response("501 Unimplemented")
- self.close_connection = True
- return False
-
- # From PEP 333:
- # "Servers and gateways that implement HTTP 1.1 must provide
- # transparent support for HTTP 1.1's "expect/continue" mechanism.
- # This may be done in any of several ways:
- # 1. Respond to requests containing an Expect: 100-continue request
- # with an immediate "100 Continue" response, and proceed normally.
- # 2. Proceed with the request normally, but provide the application
- # with a wsgi.input stream that will send the "100 Continue"
- # response if/when the application first attempts to read from
- # the input stream. The read request must then remain blocked
- # until the client responds.
- # 3. Wait until the client decides that the server does not support
- # expect/continue, and sends the request body on its own.
- # (This is suboptimal, and is not recommended.)
- #
- # We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
- # but it seems like it would be a big slowdown for such a rare case.
- if self.inheaders.get(b"Expect", b"") == b"100-continue":
- # Don't use simple_response here, because it emits headers
- # we don't want. See http://www.cherrypy.org/ticket/951
- msg = self.server.protocol.encode('ascii') + b" 100 Continue\r\n\r\n"
- try:
- self.conn.wfile.write(msg)
- except socket.error:
- x = sys.exc_info()[1]
- if x.args[0] not in socket_errors_to_ignore:
- raise
- return True
-
- def parse_request_uri(self, uri):
- """Parse a Request-URI into (scheme, authority, path).
-
- Note that Request-URI's must be one of::
-
- Request-URI = "*" | absoluteURI | abs_path | authority
-
- Therefore, a Request-URI which starts with a double forward-slash
- cannot be a "net_path"::
-
- net_path = "//" authority [ abs_path ]
-
- Instead, it must be interpreted as an "abs_path" with an empty first
- path segment::
-
- abs_path = "/" path_segments
- path_segments = segment *( "/" segment )
- segment = *pchar *( ";" param )
- param = *pchar
- """
- if uri == ASTERISK:
- return None, None, uri
-
- scheme, sep, remainder = uri.partition(b'://')
- if sep and QUESTION_MARK not in scheme:
- # An absoluteURI.
- # If there's a scheme (and it must be http or https), then:
- # http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
- authority, path_a, path_b = remainder.partition(FORWARD_SLASH)
- return scheme.lower(), authority, path_a+path_b
-
- if uri.startswith(FORWARD_SLASH):
- # An abs_path.
- return None, None, uri
- else:
- # An authority.
- return None, uri, None
-
- def unquote_bytes(self, path):
- """takes quoted string and unquotes % encoded values"""
- res = path.split(b'%')
-
- for i in range(1, len(res)):
- item = res[i]
- try:
- res[i] = bytes([int(item[:2], 16)]) + item[2:]
- except ValueError:
- raise
- return b''.join(res)
-
- def respond(self):
- """Call the gateway and write its iterable output."""
- mrbs = self.server.max_request_body_size
- if self.chunked_read:
- self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
- else:
- cl = int(self.inheaders.get(b"Content-Length", 0))
- if mrbs and mrbs < cl:
- if not self.sent_headers:
- self.simple_response("413 Request Entity Too Large",
- "The entity sent with the request exceeds the maximum "
- "allowed bytes.")
- return
- self.rfile = KnownLengthRFile(self.conn.rfile, cl)
-
- self.server.gateway(self).respond()
-
- if (self.ready and not self.sent_headers):
- self.sent_headers = True
- self.send_headers()
- if self.chunked_write:
- self.conn.wfile.write(b"0\r\n\r\n")
-
- def simple_response(self, status, msg=""):
- """Write a simple response back to the client."""
- status = str(status)
- buf = [bytes(self.server.protocol, "ascii") + SPACE +
- bytes(status, "ISO-8859-1") + CRLF,
- bytes("Content-Length: %s\r\n" % len(msg), "ISO-8859-1"),
- b"Content-Type: text/plain\r\n"]
-
- if status[:3] in ("413", "414"):
- # Request Entity Too Large / Request-URI Too Long
- self.close_connection = True
- if self.response_protocol == 'HTTP/1.1':
- # This will not be true for 414, since read_request_line
- # usually raises 414 before reading the whole line, and we
- # therefore cannot know the proper response_protocol.
- buf.append(b"Connection: close\r\n")
- else:
- # HTTP/1.0 had no 413/414 status nor Connection header.
- # Emit 400 instead and trust the message body is enough.
- status = "400 Bad Request"
-
- buf.append(CRLF)
- if msg:
- if isinstance(msg, unicodestr):
- msg = msg.encode("ISO-8859-1")
- buf.append(msg)
-
- try:
- self.conn.wfile.write(b"".join(buf))
- except socket.error:
- x = sys.exc_info()[1]
- if x.args[0] not in socket_errors_to_ignore:
- raise
-
- def write(self, chunk):
- """Write unbuffered data to the client."""
- if self.chunked_write and chunk:
- buf = [bytes(hex(len(chunk)), 'ASCII')[2:], CRLF, chunk, CRLF]
- self.conn.wfile.write(EMPTY.join(buf))
- else:
- self.conn.wfile.write(chunk)
-
- def send_headers(self):
- """Assert, process, and send the HTTP response message-headers.
-
- You must set self.status, and self.outheaders before calling this.
- """
- hkeys = [key.lower() for key, value in self.outheaders]
- status = int(self.status[:3])
-
- if status == 413:
- # Request Entity Too Large. Close conn to avoid garbage.
- self.close_connection = True
- elif b"content-length" not in hkeys:
- # "All 1xx (informational), 204 (no content),
- # and 304 (not modified) responses MUST NOT
- # include a message-body." So no point chunking.
- if status < 200 or status in (204, 205, 304):
- pass
- else:
- if (self.response_protocol == 'HTTP/1.1'
- and self.method != b'HEAD'):
- # Use the chunked transfer-coding
- self.chunked_write = True
- self.outheaders.append((b"Transfer-Encoding", b"chunked"))
- else:
- # Closing the conn is the only way to determine len.
- self.close_connection = True
-
- if b"connection" not in hkeys:
- if self.response_protocol == 'HTTP/1.1':
- # Both server and client are HTTP/1.1 or better
- if self.close_connection:
- self.outheaders.append((b"Connection", b"close"))
- else:
- # Server and/or client are HTTP/1.0
- if not self.close_connection:
- self.outheaders.append((b"Connection", b"Keep-Alive"))
-
- if (not self.close_connection) and (not self.chunked_read):
- # Read any remaining request body data on the socket.
- # "If an origin server receives a request that does not include an
- # Expect request-header field with the "100-continue" expectation,
- # the request includes a request body, and the server responds
- # with a final status code before reading the entire request body
- # from the transport connection, then the server SHOULD NOT close
- # the transport connection until it has read the entire request,
- # or until the client closes the connection. Otherwise, the client
- # might not reliably receive the response message. However, this
- # requirement is not be construed as preventing a server from
- # defending itself against denial-of-service attacks, or from
- # badly broken client implementations."
- remaining = getattr(self.rfile, 'remaining', 0)
- if remaining > 0:
- self.rfile.read(remaining)
-
- if b"date" not in hkeys:
- self.outheaders.append(
- (b"Date", email.utils.formatdate(usegmt=True).encode('ISO-8859-1')))
-
- if b"server" not in hkeys:
- self.outheaders.append(
- (b"Server", self.server.server_name.encode('ISO-8859-1')))
-
- buf = [self.server.protocol.encode('ascii') + SPACE + self.status + CRLF]
- for k, v in self.outheaders:
- buf.append(k + COLON + SPACE + v + CRLF)
- buf.append(CRLF)
- self.conn.wfile.write(EMPTY.join(buf))
-
-
-class NoSSLError(Exception):
- """Exception raised when a client speaks HTTP to an HTTPS socket."""
- pass
-
-
-class FatalSSLAlert(Exception):
- """Exception raised when the SSL implementation signals a fatal alert."""
- pass
-
-
-class CP_BufferedWriter(io.BufferedWriter):
- """Faux file object attached to a socket object."""
-
- def write(self, b):
- self._checkClosed()
- if isinstance(b, str):
- raise TypeError("can't write str to binary stream")
-
- with self._write_lock:
- self._write_buf.extend(b)
- self._flush_unlocked()
- return len(b)
-
- def _flush_unlocked(self):
- self._checkClosed("flush of closed file")
- while self._write_buf:
- try:
- # ssl sockets only except 'bytes', not bytearrays
- # so perhaps we should conditionally wrap this for perf?
- n = self.raw.write(bytes(self._write_buf))
- except io.BlockingIOError as e:
- n = e.characters_written
- del self._write_buf[:n]
-
-
-def CP_makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
- if 'r' in mode:
- return io.BufferedReader(socket.SocketIO(sock, mode), bufsize)
- else:
- return CP_BufferedWriter(socket.SocketIO(sock, mode), bufsize)
-
-class HTTPConnection(object):
- """An HTTP connection (active socket).
-
- server: the Server object which received this connection.
- socket: the raw socket object (usually TCP) for this connection.
- makefile: a fileobject class for reading from the socket.
- """
-
- remote_addr = None
- remote_port = None
- ssl_env = None
- rbufsize = DEFAULT_BUFFER_SIZE
- wbufsize = DEFAULT_BUFFER_SIZE
- RequestHandlerClass = HTTPRequest
-
- def __init__(self, server, sock, makefile=CP_makefile):
- self.server = server
- self.socket = sock
- self.rfile = makefile(sock, "rb", self.rbufsize)
- self.wfile = makefile(sock, "wb", self.wbufsize)
- self.requests_seen = 0
-
- def communicate(self):
- """Read each request and respond appropriately."""
- request_seen = False
- try:
- while True:
- # (re)set req to None so that if something goes wrong in
- # the RequestHandlerClass constructor, the error doesn't
- # get written to the previous request.
- req = None
- req = self.RequestHandlerClass(self.server, self)
-
- # This order of operations should guarantee correct pipelining.
- req.parse_request()
- if self.server.stats['Enabled']:
- self.requests_seen += 1
- if not req.ready:
- # Something went wrong in the parsing (and the server has
- # probably already made a simple_response). Return and
- # let the conn close.
- return
-
- request_seen = True
- req.respond()
- if req.close_connection:
- return
- except socket.error:
- e = sys.exc_info()[1]
- errnum = e.args[0]
- # sadly SSL sockets return a different (longer) time out string
- if errnum == 'timed out' or errnum == 'The read operation timed out':
- # Don't error if we're between requests; only error
- # if 1) no request has been started at all, or 2) we're
- # in the middle of a request.
- # See http://www.cherrypy.org/ticket/853
- if (not request_seen) or (req and req.started_request):
- # Don't bother writing the 408 if the response
- # has already started being written.
- if req and not req.sent_headers:
- try:
- req.simple_response("408 Request Timeout")
- except FatalSSLAlert:
- # Close the connection.
- return
- elif errnum not in socket_errors_to_ignore:
- self.server.error_log("socket.error %s" % repr(errnum),
- level=logging.WARNING, traceback=True)
- if req and not req.sent_headers:
- try:
- req.simple_response("500 Internal Server Error")
- except FatalSSLAlert:
- # Close the connection.
- return
- return
- except (KeyboardInterrupt, SystemExit):
- raise
- except FatalSSLAlert:
- # Close the connection.
- return
- except NoSSLError:
- if req and not req.sent_headers:
- # Unwrap our wfile
- self.wfile = CP_makefile(self.socket._sock, "wb", self.wbufsize)
- req.simple_response("400 Bad Request",
- "The client sent a plain HTTP request, but "
- "this server only speaks HTTPS on this port.")
- self.linger = True
- except Exception:
- e = sys.exc_info()[1]
- self.server.error_log(repr(e), level=logging.ERROR, traceback=True)
- if req and not req.sent_headers:
- try:
- req.simple_response("500 Internal Server Error")
- except FatalSSLAlert:
- # Close the connection.
- return
-
- linger = False
-
- def close(self):
- """Close the socket underlying this connection."""
- self.rfile.close()
-
- if not self.linger:
- # Python's socket module does NOT call close on the kernel socket
- # when you call socket.close(). We do so manually here because we
- # want this server to send a FIN TCP segment immediately. Note this
- # must be called *before* calling socket.close(), because the latter
- # drops its reference to the kernel socket.
- # Python 3 *probably* fixed this with socket._real_close; hard to tell.
-## self.socket._sock.close()
- self.socket.close()
- else:
- # On the other hand, sometimes we want to hang around for a bit
- # to make sure the client has a chance to read our entire
- # response. Skipping the close() calls here delays the FIN
- # packet until the socket object is garbage-collected later.
- # Someday, perhaps, we'll do the full lingering_close that
- # Apache does, but not today.
- pass
-
-
-class TrueyZero(object):
- """An object which equals and does math like the integer '0' but evals True."""
- def __add__(self, other):
- return other
- def __radd__(self, other):
- return other
-trueyzero = TrueyZero()
-
-
-_SHUTDOWNREQUEST = None
-
-class WorkerThread(threading.Thread):
- """Thread which continuously polls a Queue for Connection objects.
-
- Due to the timing issues of polling a Queue, a WorkerThread does not
- check its own 'ready' flag after it has started. To stop the thread,
- it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
- (one for each running WorkerThread).
- """
-
- conn = None
- """The current connection pulled off the Queue, or None."""
-
- server = None
- """The HTTP Server which spawned this thread, and which owns the
- Queue and is placing active connections into it."""
-
- ready = False
- """A simple flag for the calling server to know when this thread
- has begun polling the Queue."""
-
-
- def __init__(self, server):
- self.ready = False
- self.server = server
-
- self.requests_seen = 0
- self.bytes_read = 0
- self.bytes_written = 0
- self.start_time = None
- self.work_time = 0
- self.stats = {
- 'Requests': lambda s: self.requests_seen + ((self.start_time is None) and trueyzero or self.conn.requests_seen),
- 'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and trueyzero or self.conn.rfile.bytes_read),
- 'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and trueyzero or self.conn.wfile.bytes_written),
- 'Work Time': lambda s: self.work_time + ((self.start_time is None) and trueyzero or time.time() - self.start_time),
- 'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6),
- 'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6),
- }
- threading.Thread.__init__(self)
-
- def run(self):
- self.server.stats['Worker Threads'][self.getName()] = self.stats
- try:
- self.ready = True
- while True:
- conn = self.server.requests.get()
- if conn is _SHUTDOWNREQUEST:
- return
-
- self.conn = conn
- if self.server.stats['Enabled']:
- self.start_time = time.time()
- try:
- conn.communicate()
- finally:
- conn.close()
- if self.server.stats['Enabled']:
- self.requests_seen += self.conn.requests_seen
- self.bytes_read += self.conn.rfile.bytes_read
- self.bytes_written += self.conn.wfile.bytes_written
- self.work_time += time.time() - self.start_time
- self.start_time = None
- self.conn = None
- except (KeyboardInterrupt, SystemExit):
- exc = sys.exc_info()[1]
- self.server.interrupt = exc
-
-
-class ThreadPool(object):
- """A Request Queue for an HTTPServer which pools threads.
-
- ThreadPool objects must provide min, get(), put(obj), start()
- and stop(timeout) attributes.
- """
-
- def __init__(self, server, min=10, max=-1):
- self.server = server
- self.min = min
- self.max = max
- self._threads = []
- self._queue = queue.Queue()
- self.get = self._queue.get
-
- def start(self):
- """Start the pool of threads."""
- for i in range(self.min):
- self._threads.append(WorkerThread(self.server))
- for worker in self._threads:
- worker.setName("CP Server " + worker.getName())
- worker.start()
- for worker in self._threads:
- while not worker.ready:
- time.sleep(.1)
-
- def _get_idle(self):
- """Number of worker threads which are idle. Read-only."""
- return len([t for t in self._threads if t.conn is None])
- idle = property(_get_idle, doc=_get_idle.__doc__)
-
- def put(self, obj):
- self._queue.put(obj)
- if obj is _SHUTDOWNREQUEST:
- return
-
- def grow(self, amount):
- """Spawn new worker threads (not above self.max)."""
- for i in range(amount):
- if self.max > 0 and len(self._threads) >= self.max:
- break
- worker = WorkerThread(self.server)
- worker.setName("CP Server " + worker.getName())
- self._threads.append(worker)
- worker.start()
-
- def shrink(self, amount):
- """Kill off worker threads (not below self.min)."""
- # Grow/shrink the pool if necessary.
- # Remove any dead threads from our list
- for t in self._threads:
- if not t.isAlive():
- self._threads.remove(t)
- amount -= 1
-
- if amount > 0:
- for i in range(min(amount, len(self._threads) - self.min)):
- # Put a number of shutdown requests on the queue equal
- # to 'amount'. Once each of those is processed by a worker,
- # that worker will terminate and be culled from our list
- # in self.put.
- self._queue.put(_SHUTDOWNREQUEST)
-
- def stop(self, timeout=5):
- # Must shut down threads here so the code that calls
- # this method can know when all threads are stopped.
- for worker in self._threads:
- self._queue.put(_SHUTDOWNREQUEST)
-
- # Don't join currentThread (when stop is called inside a request).
- current = threading.currentThread()
- if timeout and timeout >= 0:
- endtime = time.time() + timeout
- while self._threads:
- worker = self._threads.pop()
- if worker is not current and worker.isAlive():
- try:
- if timeout is None or timeout < 0:
- worker.join()
- else:
- remaining_time = endtime - time.time()
- if remaining_time > 0:
- worker.join(remaining_time)
- if worker.isAlive():
- # We exhausted the timeout.
- # Forcibly shut down the socket.
- c = worker.conn
- if c and not c.rfile.closed:
- try:
- c.socket.shutdown(socket.SHUT_RD)
- except TypeError:
- # pyOpenSSL sockets don't take an arg
- c.socket.shutdown()
- worker.join()
- except (AssertionError,
- # Ignore repeated Ctrl-C.
- # See http://www.cherrypy.org/ticket/691.
- KeyboardInterrupt):
- pass
-
- def _get_qsize(self):
- return self._queue.qsize()
- qsize = property(_get_qsize)
-
-
-
-try:
- import fcntl
-except ImportError:
- try:
- from ctypes import windll, WinError
- except ImportError:
- def prevent_socket_inheritance(sock):
- """Dummy function, since neither fcntl nor ctypes are available."""
- pass
- else:
- def prevent_socket_inheritance(sock):
- """Mark the given socket fd as non-inheritable (Windows)."""
- if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
- raise WinError()
-else:
- def prevent_socket_inheritance(sock):
- """Mark the given socket fd as non-inheritable (POSIX)."""
- fd = sock.fileno()
- old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
- fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
-
-
-class SSLAdapter(object):
- """Base class for SSL driver library adapters.
-
- Required methods:
-
- * ``wrap(sock) -> (wrapped socket, ssl environ dict)``
- * ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object``
- """
-
- def __init__(self, certificate, private_key, certificate_chain=None):
- self.certificate = certificate
- self.private_key = private_key
- self.certificate_chain = certificate_chain
-
- def wrap(self, sock):
- raise NotImplemented
-
- def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
- raise NotImplemented
-
-
-class HTTPServer(object):
- """An HTTP server."""
-
- _bind_addr = "127.0.0.1"
- _interrupt = None
-
- gateway = None
- """A Gateway instance."""
-
- minthreads = None
- """The minimum number of worker threads to create (default 10)."""
-
- maxthreads = None
- """The maximum number of worker threads to create (default -1 = no limit)."""
-
- server_name = None
- """The name of the server; defaults to socket.gethostname()."""
-
- protocol = "HTTP/1.1"
- """The version string to write in the Status-Line of all HTTP responses.
-
- For example, "HTTP/1.1" is the default. This also limits the supported
- features used in the response."""
-
- request_queue_size = 5
- """The 'backlog' arg to socket.listen(); max queued connections (default 5)."""
-
- shutdown_timeout = 5
- """The total time, in seconds, to wait for worker threads to cleanly exit."""
-
- timeout = 10
- """The timeout in seconds for accepted connections (default 10)."""
-
- version = "CherryPy/3.2.2"
- """A version string for the HTTPServer."""
-
- software = None
- """The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
-
- If None, this defaults to ``'%s Server' % self.version``."""
-
- ready = False
- """An internal flag which marks whether the socket is accepting connections."""
-
- max_request_header_size = 0
- """The maximum size, in bytes, for request headers, or 0 for no limit."""
-
- max_request_body_size = 0
- """The maximum size, in bytes, for request bodies, or 0 for no limit."""
-
- nodelay = True
- """If True (the default since 3.1), sets the TCP_NODELAY socket option."""
-
- ConnectionClass = HTTPConnection
- """The class to use for handling HTTP connections."""
-
- ssl_adapter = None
- """An instance of SSLAdapter (or a subclass).
-
- You must have the corresponding SSL driver library installed."""
-
- def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
- server_name=None):
- self.bind_addr = bind_addr
- self.gateway = gateway
-
- self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
-
- if not server_name:
- server_name = socket.gethostname()
- self.server_name = server_name
- self.clear_stats()
-
- def clear_stats(self):
- self._start_time = None
- self._run_time = 0
- self.stats = {
- 'Enabled': False,
- 'Bind Address': lambda s: repr(self.bind_addr),
- 'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(),
- 'Accepts': 0,
- 'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
- 'Queue': lambda s: getattr(self.requests, "qsize", None),
- 'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
- 'Threads Idle': lambda s: getattr(self.requests, "idle", None),
- 'Socket Errors': 0,
- 'Requests': lambda s: (not s['Enabled']) and -1 or sum([w['Requests'](w) for w
- in s['Worker Threads'].values()], 0),
- 'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Read'](w) for w
- in s['Worker Threads'].values()], 0),
- 'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Written'](w) for w
- in s['Worker Threads'].values()], 0),
- 'Work Time': lambda s: (not s['Enabled']) and -1 or sum([w['Work Time'](w) for w
- in s['Worker Threads'].values()], 0),
- 'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum(
- [w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
- for w in s['Worker Threads'].values()], 0),
- 'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum(
- [w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
- for w in s['Worker Threads'].values()], 0),
- 'Worker Threads': {},
- }
- logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
-
- def runtime(self):
- if self._start_time is None:
- return self._run_time
- else:
- return self._run_time + (time.time() - self._start_time)
-
- def __str__(self):
- return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
- self.bind_addr)
-
- def _get_bind_addr(self):
- return self._bind_addr
- def _set_bind_addr(self, value):
- if isinstance(value, tuple) and value[0] in ('', None):
- # Despite the socket module docs, using '' does not
- # allow AI_PASSIVE to work. Passing None instead
- # returns '0.0.0.0' like we want. In other words:
- # host AI_PASSIVE result
- # '' Y 192.168.x.y
- # '' N 192.168.x.y
- # None Y 0.0.0.0
- # None N 127.0.0.1
- # But since you can get the same effect with an explicit
- # '0.0.0.0', we deny both the empty string and None as values.
- raise ValueError("Host values of '' or None are not allowed. "
- "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
- "to listen on all active interfaces.")
- self._bind_addr = value
- bind_addr = property(_get_bind_addr, _set_bind_addr,
- doc="""The interface on which to listen for connections.
-
- For TCP sockets, a (host, port) tuple. Host values may be any IPv4
- or IPv6 address, or any valid hostname. The string 'localhost' is a
- synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
- The string '0.0.0.0' is a special IPv4 entry meaning "any active
- interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
- IPv6. The empty string or None are not allowed.
-
- For UNIX sockets, supply the filename as a string.""")
-
- def start(self):
- """Run the server forever."""
- # We don't have to trap KeyboardInterrupt or SystemExit here,
- # because cherrpy.server already does so, calling self.stop() for us.
- # If you're using this server with another framework, you should
- # trap those exceptions in whatever code block calls start().
- self._interrupt = None
-
- if self.software is None:
- self.software = "%s Server" % self.version
-
- # Select the appropriate socket
- if isinstance(self.bind_addr, basestring):
- # AF_UNIX socket
-
- # So we can reuse the socket...
- try: os.unlink(self.bind_addr)
- except: pass
-
- # So everyone can access the socket...
- try: os.chmod(self.bind_addr, 511) # 0777
- except: pass
-
- info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
- else:
- # AF_INET or AF_INET6 socket
- # Get the correct address family for our host (allows IPv6 addresses)
- host, port = self.bind_addr
- try:
- info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
- socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
- except socket.gaierror:
- if ':' in self.bind_addr[0]:
- info = [(socket.AF_INET6, socket.SOCK_STREAM,
- 0, "", self.bind_addr + (0, 0))]
- else:
- info = [(socket.AF_INET, socket.SOCK_STREAM,
- 0, "", self.bind_addr)]
-
- self.socket = None
- msg = "No socket could be created"
- for res in info:
- af, socktype, proto, canonname, sa = res
- try:
- self.bind(af, socktype, proto)
- except socket.error:
- if self.socket:
- self.socket.close()
- self.socket = None
- continue
- break
- if not self.socket:
- raise socket.error(msg)
-
- # Timeout so KeyboardInterrupt can be caught on Win32
- self.socket.settimeout(1)
- self.socket.listen(self.request_queue_size)
-
- # Create worker threads
- self.requests.start()
-
- self.ready = True
- self._start_time = time.time()
- while self.ready:
- try:
- self.tick()
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.error_log("Error in HTTPServer.tick", level=logging.ERROR,
- traceback=True)
- if self.interrupt:
- while self.interrupt is True:
- # Wait for self.stop() to complete. See _set_interrupt.
- time.sleep(0.1)
- if self.interrupt:
- raise self.interrupt
-
- def error_log(self, msg="", level=20, traceback=False):
- # Override this in subclasses as desired
- sys.stderr.write(msg + '\n')
- sys.stderr.flush()
- if traceback:
- tblines = format_exc()
- sys.stderr.write(tblines)
- sys.stderr.flush()
-
- def bind(self, family, type, proto=0):
- """Create (or recreate) the actual socket object."""
- self.socket = socket.socket(family, type, proto)
- prevent_socket_inheritance(self.socket)
- self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- if self.nodelay and not isinstance(self.bind_addr, str):
- self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-
- if self.ssl_adapter is not None:
- self.socket = self.ssl_adapter.bind(self.socket)
-
- # If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
- # activate dual-stack. See http://www.cherrypy.org/ticket/871.
- if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
- and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
- try:
- self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
- except (AttributeError, socket.error):
- # Apparently, the socket option is not available in
- # this machine's TCP stack
- pass
-
- self.socket.bind(self.bind_addr)
-
- def tick(self):
- """Accept a new connection and put it on the Queue."""
- try:
- s, addr = self.socket.accept()
- if self.stats['Enabled']:
- self.stats['Accepts'] += 1
- if not self.ready:
- return
-
- prevent_socket_inheritance(s)
- if hasattr(s, 'settimeout'):
- s.settimeout(self.timeout)
-
- makefile = CP_makefile
- ssl_env = {}
- # if ssl cert and key are set, we try to be a secure HTTP server
- if self.ssl_adapter is not None:
- try:
- s, ssl_env = self.ssl_adapter.wrap(s)
- except NoSSLError:
- msg = ("The client sent a plain HTTP request, but "
- "this server only speaks HTTPS on this port.")
- buf = ["%s 400 Bad Request\r\n" % self.protocol,
- "Content-Length: %s\r\n" % len(msg),
- "Content-Type: text/plain\r\n\r\n",
- msg]
-
- wfile = makefile(s, "wb", DEFAULT_BUFFER_SIZE)
- try:
- wfile.write("".join(buf).encode('ISO-8859-1'))
- except socket.error:
- x = sys.exc_info()[1]
- if x.args[0] not in socket_errors_to_ignore:
- raise
- return
- if not s:
- return
- makefile = self.ssl_adapter.makefile
- # Re-apply our timeout since we may have a new socket object
- if hasattr(s, 'settimeout'):
- s.settimeout(self.timeout)
-
- conn = self.ConnectionClass(self, s, makefile)
-
- if not isinstance(self.bind_addr, basestring):
- # optional values
- # Until we do DNS lookups, omit REMOTE_HOST
- if addr is None: # sometimes this can happen
- # figure out if AF_INET or AF_INET6.
- if len(s.getsockname()) == 2:
- # AF_INET
- addr = ('0.0.0.0', 0)
- else:
- # AF_INET6
- addr = ('::', 0)
- conn.remote_addr = addr[0]
- conn.remote_port = addr[1]
-
- conn.ssl_env = ssl_env
-
- self.requests.put(conn)
- except socket.timeout:
- # The only reason for the timeout in start() is so we can
- # notice keyboard interrupts on Win32, which don't interrupt
- # accept() by default
- return
- except socket.error:
- x = sys.exc_info()[1]
- if self.stats['Enabled']:
- self.stats['Socket Errors'] += 1
- if x.args[0] in socket_error_eintr:
- # I *think* this is right. EINTR should occur when a signal
- # is received during the accept() call; all docs say retry
- # the call, and I *think* I'm reading it right that Python
- # will then go ahead and poll for and handle the signal
- # elsewhere. See http://www.cherrypy.org/ticket/707.
- return
- if x.args[0] in socket_errors_nonblocking:
- # Just try again. See http://www.cherrypy.org/ticket/479.
- return
- if x.args[0] in socket_errors_to_ignore:
- # Our socket was closed.
- # See http://www.cherrypy.org/ticket/686.
- return
- raise
-
- def _get_interrupt(self):
- return self._interrupt
- def _set_interrupt(self, interrupt):
- self._interrupt = True
- self.stop()
- self._interrupt = interrupt
- interrupt = property(_get_interrupt, _set_interrupt,
- doc="Set this to an Exception instance to "
- "interrupt the server.")
-
- def stop(self):
- """Gracefully shutdown a server that is serving forever."""
- self.ready = False
- if self._start_time is not None:
- self._run_time += (time.time() - self._start_time)
- self._start_time = None
-
- sock = getattr(self, "socket", None)
- if sock:
- if not isinstance(self.bind_addr, basestring):
- # Touch our own socket to make accept() return immediately.
- try:
- host, port = sock.getsockname()[:2]
- except socket.error:
- x = sys.exc_info()[1]
- if x.args[0] not in socket_errors_to_ignore:
- # Changed to use error code and not message
- # See http://www.cherrypy.org/ticket/860.
- raise
- else:
- # Note that we're explicitly NOT using AI_PASSIVE,
- # here, because we want an actual IP to touch.
- # localhost won't work if we've bound to a public IP,
- # but it will if we bound to '0.0.0.0' (INADDR_ANY).
- for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
- socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- s = None
- try:
- s = socket.socket(af, socktype, proto)
- # See http://groups.google.com/group/cherrypy-users/
- # browse_frm/thread/bbfe5eb39c904fe0
- s.settimeout(1.0)
- s.connect((host, port))
- s.close()
- except socket.error:
- if s:
- s.close()
- if hasattr(sock, "close"):
- sock.close()
- self.socket = None
-
- self.requests.stop(self.shutdown_timeout)
-
-
-class Gateway(object):
- """A base class to interface HTTPServer with other systems, such as WSGI."""
-
- def __init__(self, req):
- self.req = req
-
- def respond(self):
- """Process the current request. Must be overridden in a subclass."""
- raise NotImplemented
-
-
-# These may either be wsgiserver.SSLAdapter subclasses or the string names
-# of such classes (in which case they will be lazily loaded).
-ssl_adapters = {
- 'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
- }
-
-def get_ssl_adapter_class(name='builtin'):
- """Return an SSL adapter class for the given name."""
- adapter = ssl_adapters[name.lower()]
- if isinstance(adapter, basestring):
- last_dot = adapter.rfind(".")
- attr_name = adapter[last_dot + 1:]
- mod_path = adapter[:last_dot]
-
- try:
- mod = sys.modules[mod_path]
- if mod is None:
- raise KeyError()
- except KeyError:
- # The last [''] is important.
- mod = __import__(mod_path, globals(), locals(), [''])
-
- # Let an AttributeError propagate outward.
- try:
- adapter = getattr(mod, attr_name)
- except AttributeError:
- raise AttributeError("'%s' object has no attribute '%s'"
- % (mod_path, attr_name))
-
- return adapter
-
-# -------------------------------- WSGI Stuff -------------------------------- #
-
-
-class CherryPyWSGIServer(HTTPServer):
- """A subclass of HTTPServer which calls a WSGI application."""
-
- wsgi_version = (1, 0)
- """The version of WSGI to produce."""
-
- def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
- max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
- self.requests = ThreadPool(self, min=numthreads or 1, max=max)
- self.wsgi_app = wsgi_app
- self.gateway = wsgi_gateways[self.wsgi_version]
-
- self.bind_addr = bind_addr
- if not server_name:
- server_name = socket.gethostname()
- self.server_name = server_name
- self.request_queue_size = request_queue_size
-
- self.timeout = timeout
- self.shutdown_timeout = shutdown_timeout
- self.clear_stats()
-
- def _get_numthreads(self):
- return self.requests.min
- def _set_numthreads(self, value):
- self.requests.min = value
- numthreads = property(_get_numthreads, _set_numthreads)
-
-
-class WSGIGateway(Gateway):
- """A base class to interface HTTPServer with WSGI."""
-
- def __init__(self, req):
- self.req = req
- self.started_response = False
- self.env = self.get_environ()
- self.remaining_bytes_out = None
-
- def get_environ(self):
- """Return a new environ dict targeting the given wsgi.version"""
- raise NotImplemented
-
- def respond(self):
- """Process the current request."""
- response = self.req.server.wsgi_app(self.env, self.start_response)
- try:
- for chunk in response:
- # "The start_response callable must not actually transmit
- # the response headers. Instead, it must store them for the
- # server or gateway to transmit only after the first
- # iteration of the application return value that yields
- # a NON-EMPTY string, or upon the application's first
- # invocation of the write() callable." (PEP 333)
- if chunk:
- if isinstance(chunk, unicodestr):
- chunk = chunk.encode('ISO-8859-1')
- self.write(chunk)
- finally:
- if hasattr(response, "close"):
- response.close()
-
- def start_response(self, status, headers, exc_info = None):
- """WSGI callable to begin the HTTP response."""
- # "The application may call start_response more than once,
- # if and only if the exc_info argument is provided."
- if self.started_response and not exc_info:
- raise AssertionError("WSGI start_response called a second "
- "time with no exc_info.")
- self.started_response = True
-
- # "if exc_info is provided, and the HTTP headers have already been
- # sent, start_response must raise an error, and should raise the
- # exc_info tuple."
- if self.req.sent_headers:
- try:
- raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
- finally:
- exc_info = None
-
- # According to PEP 3333, when using Python 3, the response status
- # and headers must be bytes masquerading as unicode; that is, they
- # must be of type "str" but are restricted to code points in the
- # "latin-1" set.
- if not isinstance(status, str):
- raise TypeError("WSGI response status is not of type str.")
- self.req.status = status.encode('ISO-8859-1')
-
- for k, v in headers:
- if not isinstance(k, str):
- raise TypeError("WSGI response header key %r is not of type str." % k)
- if not isinstance(v, str):
- raise TypeError("WSGI response header value %r is not of type str." % v)
- if k.lower() == 'content-length':
- self.remaining_bytes_out = int(v)
- self.req.outheaders.append((k.encode('ISO-8859-1'), v.encode('ISO-8859-1')))
-
- return self.write
-
- def write(self, chunk):
- """WSGI callable to write unbuffered data to the client.
-
- This method is also used internally by start_response (to write
- data from the iterable returned by the WSGI application).
- """
- if not self.started_response:
- raise AssertionError("WSGI write called before start_response.")
-
- chunklen = len(chunk)
- rbo = self.remaining_bytes_out
- if rbo is not None and chunklen > rbo:
- if not self.req.sent_headers:
- # Whew. We can send a 500 to the client.
- self.req.simple_response("500 Internal Server Error",
- "The requested resource returned more bytes than the "
- "declared Content-Length.")
- else:
- # Dang. We have probably already sent data. Truncate the chunk
- # to fit (so the client doesn't hang) and raise an error later.
- chunk = chunk[:rbo]
-
- if not self.req.sent_headers:
- self.req.sent_headers = True
- self.req.send_headers()
-
- self.req.write(chunk)
-
- if rbo is not None:
- rbo -= chunklen
- if rbo < 0:
- raise ValueError(
- "Response body exceeds the declared Content-Length.")
-
-
-class WSGIGateway_10(WSGIGateway):
- """A Gateway class to interface HTTPServer with WSGI 1.0.x."""
-
- def get_environ(self):
- """Return a new environ dict targeting the given wsgi.version"""
- req = self.req
- env = {
- # set a non-standard environ entry so the WSGI app can know what
- # the *real* server protocol is (and what features to support).
- # See http://www.faqs.org/rfcs/rfc2145.html.
- 'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
- 'PATH_INFO': req.path.decode('ISO-8859-1'),
- 'QUERY_STRING': req.qs.decode('ISO-8859-1'),
- 'REMOTE_ADDR': req.conn.remote_addr or '',
- 'REMOTE_PORT': str(req.conn.remote_port or ''),
- 'REQUEST_METHOD': req.method.decode('ISO-8859-1'),
- 'REQUEST_URI': req.uri,
- 'SCRIPT_NAME': '',
- 'SERVER_NAME': req.server.server_name,
- # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
- 'SERVER_PROTOCOL': req.request_protocol.decode('ISO-8859-1'),
- 'SERVER_SOFTWARE': req.server.software,
- 'wsgi.errors': sys.stderr,
- 'wsgi.input': req.rfile,
- 'wsgi.multiprocess': False,
- 'wsgi.multithread': True,
- 'wsgi.run_once': False,
- 'wsgi.url_scheme': req.scheme.decode('ISO-8859-1'),
- 'wsgi.version': (1, 0),
- }
-
- if isinstance(req.server.bind_addr, basestring):
- # AF_UNIX. This isn't really allowed by WSGI, which doesn't
- # address unix domain sockets. But it's better than nothing.
- env["SERVER_PORT"] = ""
- else:
- env["SERVER_PORT"] = str(req.server.bind_addr[1])
-
- # Request headers
- for k, v in req.inheaders.items():
- k = k.decode('ISO-8859-1').upper().replace("-", "_")
- env["HTTP_" + k] = v.decode('ISO-8859-1')
-
- # CONTENT_TYPE/CONTENT_LENGTH
- ct = env.pop("HTTP_CONTENT_TYPE", None)
- if ct is not None:
- env["CONTENT_TYPE"] = ct
- cl = env.pop("HTTP_CONTENT_LENGTH", None)
- if cl is not None:
- env["CONTENT_LENGTH"] = cl
-
- if req.conn.ssl_env:
- env.update(req.conn.ssl_env)
-
- return env
-
-
-class WSGIGateway_u0(WSGIGateway_10):
- """A Gateway class to interface HTTPServer with WSGI u.0.
-
- WSGI u.0 is an experimental protocol, which uses unicode for keys and values
- in both Python 2 and Python 3.
- """
-
- def get_environ(self):
- """Return a new environ dict targeting the given wsgi.version"""
- req = self.req
- env_10 = WSGIGateway_10.get_environ(self)
- env = env_10.copy()
- env['wsgi.version'] = ('u', 0)
-
- # Request-URI
- env.setdefault('wsgi.url_encoding', 'utf-8')
- try:
- # SCRIPT_NAME is the empty string, who cares what encoding it is?
- env["PATH_INFO"] = req.path.decode(env['wsgi.url_encoding'])
- env["QUERY_STRING"] = req.qs.decode(env['wsgi.url_encoding'])
- except UnicodeDecodeError:
- # Fall back to latin 1 so apps can transcode if needed.
- env['wsgi.url_encoding'] = 'ISO-8859-1'
- env["PATH_INFO"] = env_10["PATH_INFO"]
- env["QUERY_STRING"] = env_10["QUERY_STRING"]
-
- return env
-
-wsgi_gateways = {
- (1, 0): WSGIGateway_10,
- ('u', 0): WSGIGateway_u0,
-}
-
-class WSGIPathInfoDispatcher(object):
- """A WSGI dispatcher for dispatch based on the PATH_INFO.
-
- apps: a dict or list of (path_prefix, app) pairs.
- """
-
- def __init__(self, apps):
- try:
- apps = list(apps.items())
- except AttributeError:
- pass
-
- # Sort the apps by len(path), descending
- apps.sort()
- apps.reverse()
-
- # The path_prefix strings must start, but not end, with a slash.
- # Use "" instead of "/".
- self.apps = [(p.rstrip("/"), a) for p, a in apps]
-
- def __call__(self, environ, start_response):
- path = environ["PATH_INFO"] or "/"
- for p, app in self.apps:
- # The apps list should be sorted by length, descending.
- if path.startswith(p + "/") or path == p:
- environ = environ.copy()
- environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
- environ["PATH_INFO"] = path[len(p):]
- return app(environ, start_response)
-
- start_response('404 Not Found', [('Content-Type', 'text/plain'),
- ('Content-Length', '0')])
- return ['']
-
diff --git a/python-packages/contextlib2.py b/python-packages/contextlib2.py
deleted file mode 100644
index 1fe5bc2782..0000000000
--- a/python-packages/contextlib2.py
+++ /dev/null
@@ -1,270 +0,0 @@
-"""contextlib2 - backports and enhancements to the contextlib module"""
-
-import sys
-from collections import deque
-from functools import wraps
-
-__all__ = ["contextmanager", "closing", "ContextDecorator",
- "ContextStack", "ExitStack"]
-
-
-class ContextDecorator(object):
- "A base class or mixin that enables context managers to work as decorators."
-
- def refresh_cm(self):
- """Returns the context manager used to actually wrap the call to the
- decorated function.
-
- The default implementation just returns *self*.
-
- Overriding this method allows otherwise one-shot context managers
- like _GeneratorContextManager to support use as decorators via
- implicit recreation.
- """
- return self
-
- def __call__(self, func):
- @wraps(func)
- def inner(*args, **kwds):
- with self.refresh_cm():
- return func(*args, **kwds)
- return inner
-
-
-class _GeneratorContextManager(ContextDecorator):
- """Helper for @contextmanager decorator."""
-
- def __init__(self, func, *args, **kwds):
- self.gen = func(*args, **kwds)
- self.func, self.args, self.kwds = func, args, kwds
-
- def refresh_cm(self):
- # _GCM instances are one-shot context managers, so the
- # CM must be recreated each time a decorated function is
- # called
- return self.__class__(self.func, *self.args, **self.kwds)
-
- def __enter__(self):
- try:
- return next(self.gen)
- except StopIteration:
- raise RuntimeError("generator didn't yield")
-
- def __exit__(self, type, value, traceback):
- if type is None:
- try:
- next(self.gen)
- except StopIteration:
- return
- else:
- raise RuntimeError("generator didn't stop")
- else:
- if value is None:
- # Need to force instantiation so we can reliably
- # tell if we get the same exception back
- value = type()
- try:
- self.gen.throw(type, value, traceback)
- raise RuntimeError("generator didn't stop after throw()")
- except StopIteration as exc:
- # Suppress the exception *unless* it's the same exception that
- # was passed to throw(). This prevents a StopIteration
- # raised inside the "with" statement from being suppressed
- return exc is not value
- except:
- # only re-raise if it's *not* the exception that was
- # passed to throw(), because __exit__() must not raise
- # an exception unless __exit__() itself failed. But throw()
- # has to raise the exception to signal propagation, so this
- # fixes the impedance mismatch between the throw() protocol
- # and the __exit__() protocol.
- #
- if sys.exc_info()[1] is not value:
- raise
-
-
-def contextmanager(func):
- """@contextmanager decorator.
-
- Typical usage:
-
- @contextmanager
- def some_generator():
-
- try:
- yield
- finally:
-
-
- This makes this:
-
- with some_generator() as :
-
-
- equivalent to this:
-
-
- try:
- =
-
- finally:
-
-
- """
- @wraps(func)
- def helper(*args, **kwds):
- return _GeneratorContextManager(func, *args, **kwds)
- return helper
-
-
-class closing(object):
- """Context to automatically close something at the end of a block.
-
- Code like this:
-
- with closing(.open()) as f:
-
-
- is equivalent to this:
-
- f = .open()
- try:
-
- finally:
- f.close()
-
- """
- def __init__(self, thing):
- self.thing = thing
- def __enter__(self):
- return self.thing
- def __exit__(self, *exc_info):
- self.thing.close()
-
-
-# Inspired by discussions on http://bugs.python.org/issue13585
-class ExitStack(object):
- """Context manager for dynamic management of a stack of exit callbacks
-
- For example:
-
- with ExitStack() as stack:
- files = [stack.enter_context(open(fname)) for fname in filenames]
- # All opened files will automatically be closed at the end of
- # the with statement, even if attempts to open files later
- # in the list throw an exception
-
- """
- def __init__(self):
- self._exit_callbacks = deque()
-
- def pop_all(self):
- """Preserve the context stack by transferring it to a new instance"""
- new_stack = type(self)()
- new_stack._exit_callbacks = self._exit_callbacks
- self._exit_callbacks = deque()
- return new_stack
-
- def _push_cm_exit(self, cm, cm_exit):
- """Helper to correctly register callbacks to __exit__ methods"""
- def _exit_wrapper(*exc_details):
- return cm_exit(cm, *exc_details)
- _exit_wrapper.__self__ = cm
- self.push(_exit_wrapper)
-
- def push(self, exit):
- """Registers a callback with the standard __exit__ method signature
-
- Can suppress exceptions the same way __exit__ methods can.
-
- Also accepts any object with an __exit__ method (registering the
- method instead of the object itself)
- """
- # We use an unbound method rather than a bound method to follow
- # the standard lookup behaviour for special methods
- _cb_type = type(exit)
- try:
- exit_method = _cb_type.__exit__
- except AttributeError:
- # Not a context manager, so assume its a callable
- self._exit_callbacks.append(exit)
- else:
- self._push_cm_exit(exit, exit_method)
- return exit # Allow use as a decorator
-
- def callback(self, callback, *args, **kwds):
- """Registers an arbitrary callback and arguments.
-
- Cannot suppress exceptions.
- """
- def _exit_wrapper(exc_type, exc, tb):
- callback(*args, **kwds)
- # We changed the signature, so using @wraps is not appropriate, but
- # setting __wrapped__ may still help with introspection
- _exit_wrapper.__wrapped__ = callback
- self.push(_exit_wrapper)
- return callback # Allow use as a decorator
-
- def enter_context(self, cm):
- """Enters the supplied context manager
-
- If successful, also pushes its __exit__ method as a callback and
- returns the result of the __enter__ method.
- """
- # We look up the special methods on the type to match the with statement
- _cm_type = type(cm)
- _exit = _cm_type.__exit__
- result = _cm_type.__enter__(cm)
- self._push_cm_exit(cm, _exit)
- return result
-
- def close(self):
- """Immediately unwind the context stack"""
- self.__exit__(None, None, None)
-
- def __enter__(self):
- return self
-
- def __exit__(self, *exc_details):
- if not self._exit_callbacks:
- return
- # This looks complicated, but it is really just
- # setting up a chain of try-expect statements to ensure
- # that outer callbacks still get invoked even if an
- # inner one throws an exception
- def _invoke_next_callback(exc_details):
- # Callbacks are removed from the list in FIFO order
- # but the recursion means they're invoked in LIFO order
- cb = self._exit_callbacks.popleft()
- if not self._exit_callbacks:
- # Innermost callback is invoked directly
- return cb(*exc_details)
- # More callbacks left, so descend another level in the stack
- try:
- suppress_exc = _invoke_next_callback(exc_details)
- except:
- suppress_exc = cb(*sys.exc_info())
- # Check if this cb suppressed the inner exception
- if not suppress_exc:
- raise
- else:
- # Check if inner cb suppressed the original exception
- if suppress_exc:
- exc_details = (None, None, None)
- suppress_exc = cb(*exc_details) or suppress_exc
- return suppress_exc
- # Kick off the recursive chain
- return _invoke_next_callback(exc_details)
-
-# Preserve backwards compatibility
-class ContextStack(ExitStack):
- """Backwards compatibility alias for ExitStack"""
-
- def register_exit(self, callback):
- return self.push(callback)
-
- def register(self, callback, *args, **kwds):
- return self.callback(callback, *args, **kwds)
-
- def preserve(self):
- return self.pop_all()
diff --git a/python-packages/django/conf/app_template/models.py b/python-packages/django/conf/app_template/models.py
deleted file mode 100644
index 71a8362390..0000000000
--- a/python-packages/django/conf/app_template/models.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from django.db import models
-
-# Create your models here.
diff --git a/python-packages/django/contrib/webdesign/models.py b/python-packages/django/contrib/webdesign/models.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/django_snippets/__init__.py b/python-packages/django_snippets/__init__.py
deleted file mode 100644
index 5cc550356c..0000000000
--- a/python-packages/django_snippets/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-
-VERSION = (1, 0, 1)
-
-# Dynamically calculate the version based on VERSION tuple
-if len(VERSION) > 2 and VERSION[2] is not None:
- if isinstance(VERSION[2], int):
- str_version = "%s.%s.%s" % VERSION[:3]
- else:
- str_version = "%s.%s_%s" % VERSION[:3]
-else:
- str_version = "%s.%s" % VERSION[:2]
-
-__version__ = str_version
diff --git a/python-packages/django_snippets/_mkdir.py b/python-packages/django_snippets/_mkdir.py
deleted file mode 100644
index 6c5c3fd4d1..0000000000
--- a/python-packages/django_snippets/_mkdir.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import os
-
-# http://code.activestate.com/recipes/82465-a-friendly-mkdir/
-def _mkdir(newdir):
- """works the way a good mkdir should :)
- - already exists, silently complete
- - regular file in the way, raise an exception
- - parent directory(ies) does not exist, make them as well
- """
- if os.path.isdir(newdir):
- pass
- elif os.path.isfile(newdir):
- raise OSError("a file with the same name as the desired " \
- "dir, '%s', already exists." % newdir)
- else:
- head, tail = os.path.split(newdir)
- if head and not os.path.isdir(head):
- _mkdir(head)
- if tail:
- os.mkdir(newdir)
diff --git a/python-packages/django_snippets/empty_choice_field.py b/python-packages/django_snippets/empty_choice_field.py
deleted file mode 100644
index 1df5228452..0000000000
--- a/python-packages/django_snippets/empty_choice_field.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-Modified from https://gist.github.com/davidbgk/651080
-via http://stackoverflow.com/questions/14541074/empty-label-choicefield-django
-"""
-from django import forms
-from django.utils.translation import ugettext as _
-
-class EmptyChoiceField(forms.ChoiceField):
- def __init__(self, choices, empty_label=_("(Please select a category)"), *args, **kwargs):
-
- # prepend an empty label
- choices = tuple([(u'', empty_label)] + list(choices))
-
- super(EmptyChoiceField, self).__init__(choices=choices, *args, **kwargs)
\ No newline at end of file
diff --git a/python-packages/django_snippets/jsonify.py b/python-packages/django_snippets/jsonify.py
deleted file mode 100644
index 347768d95b..0000000000
--- a/python-packages/django_snippets/jsonify.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from django.core.serializers import serialize
-from django.db.models.query import QuerySet
-from django.utils import simplejson
-from django.template import Library
-
-register = Library()
-
-def jsonify(object):
- if isinstance(object, QuerySet):
- return serialize('json', object)
- return simplejson.dumps(object)
-
-register.filter('jsonify', jsonify)
diff --git a/python-packages/django_snippets/multiselect.py b/python-packages/django_snippets/multiselect.py
deleted file mode 100644
index a8d935554c..0000000000
--- a/python-packages/django_snippets/multiselect.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# taken from http://djangosnippets.org/snippets/1200/
-from django import forms
-from django.core.exceptions import ValidationError
-from django.db import models
-from django.utils.text import capfirst
-
-
-class MultiSelectFormField(forms.MultipleChoiceField):
- widget = forms.CheckboxSelectMultiple
-
- def __init__(self, *args, **kwargs):
- self.max_choices = kwargs.pop('max_choices', 0)
- super(MultiSelectFormField, self).__init__(*args, **kwargs)
-
- def clean(self, value):
- if not value and self.required:
- raise forms.ValidationError(self.error_messages['required'])
- if value and self.max_choices and len(value) > self.max_choices:
- raise forms.ValidationError('You must select a maximum of %s choice%s.'
- % (apnumber(self.max_choices), pluralize(self.max_choices)))
- return value
-
-class MultiSelectField(models.Field):
- __metaclass__ = models.SubfieldBase
-
- def get_internal_type(self):
- return "CharField"
-
- def get_choices_default(self):
- return self.get_choices(include_blank=False)
-
- def _get_FIELD_display(self, field):
- value = getattr(self, field.attname)
- choicedict = dict(field.choices)
-
- def formfield(self, **kwargs):
- # don't call super, as that overrides default widget if it has choices
- defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name),
- 'help_text': self.help_text, 'choices':self.choices}
- if self.has_default():
- defaults['initial'] = self.get_default()
- defaults.update(kwargs)
- return MultiSelectFormField(**defaults)
-
- def get_db_prep_value(self, value, **kwargs): # needed to interact with older versions of django
- if isinstance(value, basestring):
- return value
- elif isinstance(value, list):
- return ",".join(value)
-
- def value_to_string(self, obj):
- value = self._get_val_from_obj(obj)
- return self.get_db_prep_value(value)
-
- def to_python(self, value):
- if isinstance(value, list):
- return value
- elif value==None:
- return ''
- return value.split(",")
-
- def contribute_to_class(self, cls, name):
- super(MultiSelectField, self).contribute_to_class(cls, name)
- if self.choices:
- func = lambda self, fieldname = name, choicedict = dict(self.choices):",".join([choicedict.get(value,value) for value in getattr(self,fieldname)])
- setattr(cls, 'get_%s_display' % self.name, func)
-
- def validate(self, value, model_instance):
- """
- Extension to properly validate.
- """
- assert self.choices, "Choices must be set."
- if value:
- # Make sure all values are in the acceptable set
- set_diff = set(value) - set([c[0] for c in self.choices])
- if set_diff:
- raise ValidationError("Unrecognized choices: %s" % set_diff)
-
-
-# Bcipolli: I added this to make database migrations work.
-#
-# See: http://south.aeracode.org/wiki/MyFieldsDontWork
-from south.modelsinspector import add_introspection_rules
-add_introspection_rules([], ["^django_snippets\.multiselect\.MultiSelectField"])
diff --git a/python-packages/django_snippets/profiling_middleware.py b/python-packages/django_snippets/profiling_middleware.py
deleted file mode 100644
index a191ef32a7..0000000000
--- a/python-packages/django_snippets/profiling_middleware.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Adding a middleware function
-# Original version taken from http://www.djangosnippets.org/snippets/186/
-# Original author: udfalkso
-# Modified by: Shwagroo Team and Gun.io
-
-import sys
-import os
-import re
-import hotshot, hotshot.stats
-import tempfile
-import StringIO
-
-from django.conf import settings
-
-
-words_re = re.compile( r'\s+' )
-
-group_prefix_re = [
- re.compile( "^.*/django/[^/]+" ),
- re.compile( "^(.*)/[^/]+$" ), # extract module path
- re.compile( ".*" ), # catch strange entries
-]
-
-class ProfileMiddleware(object):
- """
- Displays hotshot profiling for any view.
- http://yoursite.com/yourview/?prof
-
- Add the "prof" key to query string by appending ?prof (or &prof=)
- and you'll see the profiling results in your browser.
- It's set up to only be available in django's debug mode, is available for superuser otherwise,
- but you really shouldn't add this middleware to any production configuration.
-
- WARNING: It uses hotshot profiler which is not thread safe.
- """
- def process_request(self, request):
- if (settings.DEBUG or request.user.is_superuser) and 'prof' in request.GET:
- self.tmpfile = tempfile.mktemp()
- self.prof = hotshot.Profile(self.tmpfile)
-
- def process_view(self, request, callback, callback_args, callback_kwargs):
- if (settings.DEBUG or request.user.is_superuser) and 'prof' in request.GET:
- return self.prof.runcall(callback, request, *callback_args, **callback_kwargs)
-
- def get_group(self, file):
- for g in group_prefix_re:
- name = g.findall( file )
- if name:
- return name[0]
-
- def get_summary(self, results_dict, sum):
- list = [ (item[1], item[0]) for item in results_dict.items() ]
- list.sort( reverse = True )
- list = list[:40]
-
- res = " tottime\n"
- for item in list:
- res += "%4.1f%% %7.3f %s\n" % ( 100*item[0]/sum if sum else 0, item[0], item[1] )
-
- return res
-
- def summary_for_files(self, stats_str):
- stats_str = stats_str.split("\n")[5:]
-
- mystats = {}
- mygroups = {}
-
- sum = 0
-
- for s in stats_str:
- fields = words_re.split(s);
- if len(fields) == 7:
- time = float(fields[2])
- sum += time
- file = fields[6].split(":")[0]
-
- if not file in mystats:
- mystats[file] = 0
- mystats[file] += time
-
- group = self.get_group(file)
- if not group in mygroups:
- mygroups[ group ] = 0
- mygroups[ group ] += time
-
- return "" + \
- " ---- By file ----\n\n" + self.get_summary(mystats,sum) + "\n" + \
- " ---- By group ---\n\n" + self.get_summary(mygroups,sum) + \
- " "
-
- def process_response(self, request, response):
- if (settings.DEBUG or (hasattr(request, "user") and request.user.is_superuser)) and 'prof' in request.GET:
- self.prof.close()
-
- out = StringIO.StringIO()
- old_stdout = sys.stdout
- sys.stdout = out
-
- stats = hotshot.stats.load(self.tmpfile)
- stats.sort_stats('time', 'calls')
- stats.print_stats()
-
- sys.stdout = old_stdout
- stats_str = out.getvalue()
-
- if response and response.content and stats_str:
- response.content = "" + stats_str + " "
-
- response.content = "\n".join(response.content.split("\n")[:40])
-
- response.content += self.summary_for_files(stats_str)
-
- os.unlink(self.tmpfile)
-
- return response
\ No newline at end of file
diff --git a/python-packages/django_snippets/session_timeout_middleware.py b/python-packages/django_snippets/session_timeout_middleware.py
deleted file mode 100644
index 92973ddf92..0000000000
--- a/python-packages/django_snippets/session_timeout_middleware.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from django.contrib.auth import logout
-from django.contrib import messages
-import datetime
-
-from django.conf import settings
-from django.core.urlresolvers import reverse
-from django.http import HttpResponse, HttpResponseRedirect
-
-
-class SessionIdleTimeout:
- """
- Middleware class to timeout a session after a specified time period.
- Modified from:
- https://github.com/subhranath/django-session-idle-timeout
- """
- def process_request(self, request):
- # Only do timeout if enabled
- if settings.SESSION_IDLE_TIMEOUT:
- # Timeout is done only for authenticated logged in *student* users.
- # if (request.user.is_authenticated() or "facility_user" in request.session) and not request.is_admin:
- if request.is_student:
- current_datetime = datetime.datetime.now()
-
- # Timeout if idle time period is exceeded.
- # seconds =
- if ('last_activity' in request.session and
- (current_datetime - request.session['last_activity']).seconds >
- settings.SESSION_IDLE_TIMEOUT):
- logout(request)
- messages.add_message(request, messages.ERROR, 'Your session has been timed out')
-
- if request.is_ajax():
- response = HttpResponse(status=401)
- else:
- # Redirect to the login page if session has timed-out.
- redirect_to = request.path + "?login"
- response = HttpResponseRedirect(redirect_to)
- return response
- else:
- # Set last activity time in current session.
- request.session['last_activity'] = current_datetime
-
- return None
\ No newline at end of file
diff --git a/python-packages/fle_utils/build/management/commands/generate_blacklist.py b/python-packages/fle_utils/build/management/commands/generate_blacklist.py
deleted file mode 100644
index d026af929e..0000000000
--- a/python-packages/fle_utils/build/management/commands/generate_blacklist.py
+++ /dev/null
@@ -1,150 +0,0 @@
-import fnmatch
-import os
-import sys
-import warnings
-from datetime import datetime
-from threading import Thread
-from time import sleep, time
-from optparse import make_option
-
-from django.conf import settings; logging = settings.LOG
-from django.core.management.base import BaseCommand, CommandError
-from django.core.management import call_command
-from django.utils.importlib import import_module
-from django.utils._os import upath
-
-KA_LITE_PATH = os.path.abspath(os.path.join(settings.PROJECT_PATH, ".."))
-
-
-def make_path_relative(path):
- if path.startswith(KA_LITE_PATH):
- path = path[len(KA_LITE_PATH)+1:]
- return path
-
-
-def get_app_subdirectory_paths(subdir):
- paths = []
- for appname in settings.INSTALLED_APPS:
- app = import_module(appname)
- subdirpath = os.path.join(os.path.dirname(upath(app.__file__)), subdir)
- if os.path.exists(subdirpath):
- # paths.append(make_path_relative(subdirpath))
- paths.append(subdirpath)
- return paths
-
-
-def get_paths_matching_pattern(pattern, starting_directory=KA_LITE_PATH):
- paths = []
- for root, dirs, files in os.walk(KA_LITE_PATH):
- # root = make_path_relative(root)
- paths += [os.path.join(root, d) for d in dirs if fnmatch.fnmatch(d, pattern)]
- paths += [os.path.join(root, f) for f in files if fnmatch.fnmatch(f, pattern)]
- return paths
-
-
-def get_paths_ending_with(substring, starting_directory=KA_LITE_PATH):
- paths = []
- for root, dirs, files in os.walk(KA_LITE_PATH):
- # root = make_path_relative(root)
- paths += [os.path.join(root, d) for d in dirs if os.path.join(root, d).endswith(substring)]
- paths += [os.path.join(root, f) for f in files if os.path.join(root, f).endswith(substring)]
- return paths
-
-
-def get_blacklist(removeunused=False, exclude_patterns=[], removestatic=False, removetests=False, removei18n=False, removekhan=False, **kwargs):
-
- blacklist = []
-
- if removeunused:
- blacklist += get_paths_ending_with("perseus/src")
- blacklist += get_paths_matching_pattern(".git")
- blacklist += get_paths_matching_pattern(".gitignore")
- blacklist += get_paths_matching_pattern("requirements.txt")
- blacklist += [
- "python-packages/postmark"
- "python-packages/fle_utils/feeds",
- "python-packages/announcements",
- "python-packages/tastypie/templates",
- "python-packages/tastypie/management",
- "python-packages/django/contrib/admindocs",
- "python-packages/django/contrib/flatpages",
- # "python-packages/django/contrib/sitemaps",
- "python-packages/django/contrib/comments",
- ]
- # don't need i18n stuff for django admin, since nobody should be seeing it
- blacklist += get_paths_matching_pattern("locale", starting_directory=os.path.join(KA_LITE_PATH, "python-packages/django"))
-
- for pattern in exclude_patterns:
- blacklist += get_paths_matching_pattern(pattern)
-
- if removestatic:
- blacklist += get_app_subdirectory_paths("static")
-
- if removetests:
- blacklist += get_app_subdirectory_paths("tests")
- # blacklist += get_app_subdirectory_paths("tests.py")
- blacklist += get_paths_matching_pattern("__tests__")
- blacklist += [
- "kalite/static/khan-exercises/test",
- "python-packages/selenium",
- "kalite/testing",
- ]
-
- if removei18n:
- blacklist += get_paths_matching_pattern("locale")
- blacklist += get_paths_matching_pattern("localeplanet")
- blacklist += get_paths_matching_pattern("*.po")
- blacklist += get_paths_matching_pattern("*.mo")
- blacklist += get_paths_ending_with("jquery-ui/i18n")
-
- if removekhan:
- blacklist += get_paths_matching_pattern("khan-exercises")
- blacklist += get_paths_matching_pattern("perseus")
-
- # I want my paths absolute
- blacklist = [os.path.abspath(os.path.join("..", path)) for path in blacklist]
- return blacklist
-
-
-class Command(BaseCommand):
- args = ""
- help = "Outputs a blacklist of files not to include when distributing for production."
-
- option_list = BaseCommand.option_list + (
- make_option('', '--removeunused',
- action='store_true',
- dest='removeunused',
- default=False,
- help='Exclude a number of files not currently being used at all'),
- make_option('-e', '--exclude',
- action='append',
- dest='exclude_patterns',
- default=[],
- help='Exclude files matching a pattern (e.g. with a certain extension). Can be repeated to include multiple patterns.'),
- make_option('', '--removestatic',
- action='store_true',
- dest='removestatic',
- default=False,
- help='Exclude static files in INSTALLED_APPS (be sure to run collectstatic)'),
- make_option('', '--removetests',
- action='store_true',
- dest='removetests',
- default=False,
- help='Exclude tests folders in INSTALLED_APPS'),
- make_option('', '--removei18n',
- action='store_true',
- dest='removei18n',
- default=False,
- help='Exclude locale and other i18n files/folders'),
- make_option('', '--removekhan',
- action='store_true',
- dest='removekhan',
- default=False,
- help='Exclude khan-exercises, perseus, and other KA-specific stuff'),
- )
-
- def handle( self, *args, **options ):
-
- print "\n".join(get_blacklist(**options))
-
- # python -O /usr/lib/python2.6/compileall.py .
diff --git a/python-packages/fle_utils/chronograph/management/__init__.py b/python-packages/fle_utils/chronograph/management/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/fle_utils/chronograph/management/commands/__init__.py b/python-packages/fle_utils/chronograph/management/commands/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/fle_utils/chronograph/migrations/__init__.py b/python-packages/fle_utils/chronograph/migrations/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/fle_utils/chronograph/settings.py b/python-packages/fle_utils/chronograph/settings.py
deleted file mode 100644
index 9846f9ff6a..0000000000
--- a/python-packages/fle_utils/chronograph/settings.py
+++ /dev/null
@@ -1,11 +0,0 @@
-try:
- from kalite import local_settings
-except ImportError:
- local_settings = object()
-
-
-########################
-# Set module settings
-########################
-
-CRONSERVER_FREQUENCY = getattr(local_settings, "CRONSERVER_FREQUENCY", 600) # 10 mins (in seconds)
diff --git a/python-packages/fle_utils/config/migrations/__init__.py b/python-packages/fle_utils/config/migrations/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/fle_utils/deployments/__init__.py b/python-packages/fle_utils/deployments/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/fle_utils/deployments/migrations/__init__.py b/python-packages/fle_utils/deployments/migrations/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/fle_utils/deployments/tests.py b/python-packages/fle_utils/deployments/tests.py
deleted file mode 100644
index 501deb776c..0000000000
--- a/python-packages/fle_utils/deployments/tests.py
+++ /dev/null
@@ -1,16 +0,0 @@
-"""
-This file demonstrates writing tests using the unittest module. These will pass
-when you run "manage.py test".
-
-Replace this with more appropriate tests for your application.
-"""
-
-from django.test import TestCase
-
-
-class SimpleTest(TestCase):
- def test_basic_addition(self):
- """
- Tests that 1 + 1 always equals 2.
- """
- self.assertEqual(1 + 1, 2)
diff --git a/python-packages/fle_utils/django_utils/__init__.py b/python-packages/fle_utils/django_utils/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/fle_utils/django_utils/middleware.py b/python-packages/fle_utils/django_utils/middleware.py
deleted file mode 100644
index 9f126e2943..0000000000
--- a/python-packages/fle_utils/django_utils/middleware.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""
-"""
-from django.conf import settings
-
-
-class GetNextParam:
- def process_request(self, request):
- next = request.GET.get("next", "")
- request.next = (next.startswith("/") and next) or ""
-
-
-class JsonAsHTML(object):
- '''
- View a JSON response in your browser as HTML
- Useful for viewing stats using Django Debug Toolbar
-
- This middleware should be place AFTER Django Debug Toolbar middleware
- '''
-
- def process_response(self, request, response):
-
- #not for production or production like environment
- if not settings.DEBUG:
- return response
-
- #do nothing for actual ajax requests
- if request.is_ajax():
- return response
-
- #only do something if this is a json response
- if "application/json" in response['Content-Type'].lower():
- title = "JSON as HTML Middleware for: %s" % request.get_full_path()
- response.content = "%s%s" % (title, response.content)
- response['Content-Type'] = 'text/html'
- return response
\ No newline at end of file
diff --git a/python-packages/fle_utils/django_utils/templatetags/__init__.py b/python-packages/fle_utils/django_utils/templatetags/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/fle_utils/internet/__init__.py b/python-packages/fle_utils/internet/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/fle_utils/testing/__init__.py b/python-packages/fle_utils/testing/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/fle_utils/testing/code_testing.py b/python-packages/fle_utils/testing/code_testing.py
deleted file mode 100644
index 7a599abfc5..0000000000
--- a/python-packages/fle_utils/testing/code_testing.py
+++ /dev/null
@@ -1,222 +0,0 @@
-import copy
-import glob
-import importlib
-import os
-import re
-
-from django.conf import settings; logging = settings.LOG
-from django.utils import unittest
-
-
-def get_module_files(module_dirpath, file_filter_fn):
- source_files = []
- for root, dirs, files in os.walk(module_dirpath): # Recurse over all files
- source_files += [os.path.join(root, f) for f in files if file_filter_fn(f)] # Filter py files
- return source_files
-
-class FLECodeTest(unittest.TestCase):
- testable_packages = []
-
- def __init__(self, *args, **kwargs):
- """ """
- super(FLECodeTest, self).__init__(*args, **kwargs)
- if not hasattr(self.__class__, 'our_apps'):
- self.__class__.our_apps = set([app for app in settings.INSTALLED_APPS if app in self.testable_packages or app.split('.')[0] in self.testable_packages])
- self.__class__.compute_app_dependencies()
- self.__class__.compute_app_urlpatterns()
-
- @classmethod
- def compute_app_dependencies(cls):
- """For each app in settings.INSTALLED_APPS, load that app's settings.py to grab its dependencies
- from its own INSTALLED_APPS.
-
- Note: assumes cls.our_apps has already been computed.
- """
- cls.our_app_dependencies = {}
-
- # Get each app's dependencies.
- for app in cls.our_apps:
- module = importlib.import_module(app)
- module_dirpath = os.path.dirname(module.__file__)
- settings_filepath = os.path.join(module_dirpath, 'settings.py')
-
- if not os.path.exists(settings_filepath):
- our_app_dependencies = []
- else:
- # Load the settings.py file. This requires settings some (expected) global variables,
- # such as PROJECT_PATH and ROOT_DATA_PATH, such that the scripts execute stand-alone
- # TODO: make these scripts execute stand-alone.
- global_vars = copy.copy(globals())
- global_vars.update({
- "__file__": settings_filepath, # must let the app's settings file be set to that file!
- 'PROJECT_PATH': settings.PROJECT_PATH,
- 'ROOT_DATA_PATH': getattr(settings, 'ROOT_DATA_PATH', os.path.join(settings.PROJECT_PATH, 'data')),
- })
- app_settings = {'__package__': app} # explicit setting of the __package__, to allow absolute package ref'ing
- execfile(settings_filepath, global_vars, app_settings)
- our_app_dependencies = [anapp for anapp in app_settings.get('INSTALLED_APPS', []) if anapp in cls.our_apps]
-
- cls.our_app_dependencies[app] = our_app_dependencies
-
-
- @classmethod
- def get_fle_imports(cls, app):
- """Recurses over files within an app, searches each file for KA Lite-relevant imports,
- then grabs the fully-qualified module import for each import on each line.
-
- The logic is hacky and makes assumptions (no multi-line imports, but handles comma-delimited import lists),
- but generally works.
-
- Returns a dict of tuples
- key: filepath
- value: (actual code line, reconstructed import)
- """
- module = importlib.import_module(app)
- module_dirpath = os.path.dirname(module.__file__)
-
- imports = {}
-
- py_files = get_module_files(module_dirpath, lambda f: os.path.splitext(f)[-1] in ['.py'])
- for filepath in py_files:
- lines = open(filepath, 'r').readlines() # Read the entire file
- import_lines = [l.strip() for l in lines if 'import' in l] # Grab lines containing 'import'
- our_import_lines = []
- for import_line in import_lines:
- for rexp in [r'^\s*from\s+(.*)\s+import\s+(.*)\s*$', r'^\s*import\s+(.*)\s*$']: # Match 'import X' and 'from A import B' syntaxes
- matches = re.match(rexp, import_line)
- groups = matches and list(matches.groups()) or []
- import_mod = []
- for list_item in ((groups and groups[-1].split(",")) or []): # Takes the last item (which get split into a CSV list)
- cur_item = '.'.join([item.strip() for item in (groups[0:-1] + [list_item])]) # Reconstitute to fully-qualified import
- if any([a for a in cls.our_apps if a in cur_item]): # Search for the app in all the apps we know matter
- our_import_lines.append((import_line, cur_item)) # Store line and import item as a tuple
- if app in cur_item: # Special case: warn if fully qualified import within an app (should be relative)
- logging.warn("*** Please use relative imports within an app (%s: found '%s')" % (app, import_line))
- else: # Not a relevant / tracked import
- logging.debug("*** Skipping import: %s (%s)" % (import_line, cur_item))
- imports[filepath] = our_import_lines
- return imports
-
- # @unittest.skipIf(settings.RUNNING_IN_TRAVIS, "Skipping import tests until we get them all passing locally.")
- # def test_imports(self):
- # """For each installed app, gets all FLE imports within the code.
- # Then checks intended dependencies (via the app's settings.py:INSTALLED_APPS)
- # and looks for differences.
-
- # A single assert is done after recursing (and accumulating errors) over all apps.
- # """
-
- # bad_imports = {}
- # for app, app_dependencies in self.our_app_dependencies.iteritems():
- # imports = self.__class__.get_fle_imports(app)
-
- # # Don't include [app] in search; we want all such imports to be relative.
- # bad_imports[app] = [str((f, i[0])) for f, ins in imports.iteritems() for i in ins if not any([a for a in app_dependencies if a in i[1]])]
-
- # # Join the bad imports together into a user-meaningful string.
- # bad_imports_text = "\n\n".join(["%s:\n%s\n%s" % (app, "\n".join(self.our_app_dependencies[app]), "\n".join(bad_imports[app])) for app in bad_imports if bad_imports[app]])
- # self.assertFalse(any([app for app, bi in bad_imports.iteritems() if bi]), "Found unreported app dependencies in imports:\n%s" % bad_imports_text)
-
-
- @classmethod
- def compute_app_urlpatterns(cls):
- """For each app in settings.INSTALLED_APPS, load that app's *urls.py to grab its
- defined URLS.
-
- Note: assumes cls.our_apps has already been computed.
- """
- cls.app_urlpatterns = {}
-
- # Get each app's dependencies.
- for app in cls.our_apps:
- module = importlib.import_module(app)
- module_dirpath = os.path.dirname(module.__file__)
- settings_filepath = os.path.join(module_dirpath, 'settings.py')
-
- urlpatterns = []
- source_files = get_module_files(module_dirpath, lambda f: 'urls' in f and os.path.splitext(f)[-1] in ['.py'])
- for filepath in source_files:
- fq_urlconf_module = app + os.path.splitext(filepath[len(module_dirpath):])[0].replace('/', '.')
-
- logging.info('Processing urls file: %s' % fq_urlconf_module)
- mod = importlib.import_module(fq_urlconf_module)
- urlpatterns += mod.urlpatterns
-
- cls.app_urlpatterns[app] = urlpatterns
-
-
- @classmethod
- def get_url_reversals(cls, app):
- """Recurses over files within an app, searches each file for KA Lite-relevant URL confs,
- then grabs the fully-qualified module import for each import on each line.
-
- The logic is hacky and makes assumptions (no multi-line imports, but handles comma-delimited import lists),
- but generally works.
-
- Returns a dict of tuples
- key: filepath
- value: (actual code line, reconstructed import)
- """
-
- module = importlib.import_module(app)
- module_dirpath = os.path.dirname(module.__file__)
-
- url_reversals = {}
-
- source_files = get_module_files(module_dirpath, lambda f: os.path.splitext(f)[-1] in ['.py', '.html'])
- for filepath in source_files:
- mod_revs = []
- for line in open(filepath, 'r').readlines():
- new_revs = []
- for rexp in [r""".*reverse\(\s*['"]([^\)\s,]+)['"].*""", r""".*\{%\s*url\s+['"]([^%\s]+)['"].*"""]: # Match 'reverse(URI)' and '{% url URI %}' syntaxes
-
- matches = re.match(rexp, line)
- groups = matches and list(matches.groups()) or []
- if groups:
- new_revs += groups
- logging.debug('Found: %s; %s' % (filepath, line))
-
- if not new_revs and ('reverse(' in line or '{% url' in line):
- logging.debug("\tSkip: %s; %s" % (filepath, line))
- mod_revs += new_revs
-
- url_reversals[filepath] = mod_revs
- return url_reversals
-
-
- @classmethod
- def get_url_modules(cls, url_name):
- """Given a URL name, returns all INSTALLED_APPS that have that URL name defined within the app."""
-
- # Search patterns across all known apps that are named have that name.
- found_modules = [app for app, pats in cls.app_urlpatterns.iteritems() for pat in pats if getattr(pat, "name", None) == url_name]
- return found_modules
-
- # @unittest.skipIf(settings.RUNNING_IN_TRAVIS, "Skipping import tests until we get them all passing locally.")
- # def test_url_reversals(self):
- # """Finds all URL reversals that aren't found within the defined INSTALLED_APPS dependencies"""
- # bad_reversals = {}
-
- # for app, app_dependencies in self.our_app_dependencies.iteritems():
- # url_names_by_file = self.__class__.get_url_reversals(app)
- # url_names = [pat for pat_list in url_names_by_file.values() for pat in pat_list] # Flatten into list (not per-file)
-
- # # Clean names
- # url_names = [n for n in url_names if n and not n.startswith('admin:')] # Don't deal with admin URLs.
- # url_names = [n for n in url_names if n and not '.' in n] # eliminate fully-qualified url names
- # url_names = set(url_names) # Eliminate duplicates
-
- # # for each referenced url name, make sure this app defin
- # bad_reversals[app] = []
- # for url_name in url_names:
- # referenced_modules = set(self.get_url_modules(url_name))
- # if not referenced_modules.intersection(set([app] + app_dependencies)):
- # bad_reversals[app].append((url_name, list(referenced_modules)))
-
- # bad_reversals_text = "\n\n".join(["%s: unexpected dependencies found!\n\t(url_name [module that defines url_name])\n\t%s\nExpected dependencies:\n\t%s" % (
- # app,
- # "\n\t".join([str(t) for t in bad_reversals[app]]),
- # "\n\t".join(self.our_app_dependencies[app]),
- # ) for app in bad_reversals if bad_reversals[app]])
- # self.assertFalse(any([app for app, bi in bad_reversals.iteritems() if bi]), "Found unreported app dependencies in URL reversals:\n%s" % bad_reversals_text)
-
diff --git a/python-packages/fle_utils/testing/management/__init__.py b/python-packages/fle_utils/testing/management/__init__.py
deleted file mode 100755
index e69de29bb2..0000000000
diff --git a/python-packages/fle_utils/testing/management/commands/__init__.py b/python-packages/fle_utils/testing/management/commands/__init__.py
deleted file mode 100755
index e69de29bb2..0000000000
diff --git a/python-packages/httplib2/__init__.py b/python-packages/httplib2/__init__.py
deleted file mode 100755
index 9780d4e54c..0000000000
--- a/python-packages/httplib2/__init__.py
+++ /dev/null
@@ -1,1657 +0,0 @@
-from __future__ import generators
-"""
-httplib2
-
-A caching http interface that supports ETags and gzip
-to conserve bandwidth.
-
-Requires Python 2.3 or later
-
-Changelog:
-2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
-
-"""
-
-__author__ = "Joe Gregorio (joe@bitworking.org)"
-__copyright__ = "Copyright 2006, Joe Gregorio"
-__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
- "James Antill",
- "Xavier Verges Farrero",
- "Jonathan Feinberg",
- "Blair Zajac",
- "Sam Ruby",
- "Louis Nyffenegger"]
-__license__ = "MIT"
-__version__ = "0.8"
-
-import re
-import sys
-import email
-import email.Utils
-import email.Message
-import email.FeedParser
-import StringIO
-import gzip
-import zlib
-import httplib
-import urlparse
-import urllib
-import base64
-import os
-import copy
-import calendar
-import time
-import random
-import errno
-try:
- from hashlib import sha1 as _sha, md5 as _md5
-except ImportError:
- # prior to Python 2.5, these were separate modules
- import sha
- import md5
- _sha = sha.new
- _md5 = md5.new
-import hmac
-from gettext import gettext as _
-import socket
-
-try:
- from httplib2 import socks
-except ImportError:
- try:
- import socks
- except (ImportError, AttributeError):
- socks = None
-
-# Build the appropriate socket wrapper for ssl
-try:
- import ssl # python 2.6
- ssl_SSLError = ssl.SSLError
- def _ssl_wrap_socket(sock, key_file, cert_file,
- disable_validation, ca_certs):
- if disable_validation:
- cert_reqs = ssl.CERT_NONE
- else:
- cert_reqs = ssl.CERT_REQUIRED
- # We should be specifying SSL version 3 or TLS v1, but the ssl module
- # doesn't expose the necessary knobs. So we need to go with the default
- # of SSLv23.
- return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
- cert_reqs=cert_reqs, ca_certs=ca_certs)
-except (AttributeError, ImportError):
- ssl_SSLError = None
- def _ssl_wrap_socket(sock, key_file, cert_file,
- disable_validation, ca_certs):
- if not disable_validation:
- raise CertificateValidationUnsupported(
- "SSL certificate validation is not supported without "
- "the ssl module installed. To avoid this error, install "
- "the ssl module, or explicity disable validation.")
- ssl_sock = socket.ssl(sock, key_file, cert_file)
- return httplib.FakeSocket(sock, ssl_sock)
-
-
-if sys.version_info >= (2,3):
- from iri2uri import iri2uri
-else:
- def iri2uri(uri):
- return uri
-
-def has_timeout(timeout): # python 2.6
- if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
- return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
- return (timeout is not None)
-
-__all__ = [
- 'Http', 'Response', 'ProxyInfo', 'HttpLib2Error', 'RedirectMissingLocation',
- 'RedirectLimit', 'FailedToDecompressContent',
- 'UnimplementedDigestAuthOptionError',
- 'UnimplementedHmacDigestAuthOptionError',
- 'debuglevel', 'ProxiesUnavailableError']
-
-
-# The httplib debug level, set to a non-zero value to get debug output
-debuglevel = 0
-
-# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
-RETRIES = 2
-
-# Python 2.3 support
-if sys.version_info < (2,4):
- def sorted(seq):
- seq.sort()
- return seq
-
-# Python 2.3 support
-def HTTPResponse__getheaders(self):
- """Return list of (header, value) tuples."""
- if self.msg is None:
- raise httplib.ResponseNotReady()
- return self.msg.items()
-
-if not hasattr(httplib.HTTPResponse, 'getheaders'):
- httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
-
-# All exceptions raised here derive from HttpLib2Error
-class HttpLib2Error(Exception): pass
-
-# Some exceptions can be caught and optionally
-# be turned back into responses.
-class HttpLib2ErrorWithResponse(HttpLib2Error):
- def __init__(self, desc, response, content):
- self.response = response
- self.content = content
- HttpLib2Error.__init__(self, desc)
-
-class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
-class RedirectLimit(HttpLib2ErrorWithResponse): pass
-class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
-class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
-class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
-
-class MalformedHeader(HttpLib2Error): pass
-class RelativeURIError(HttpLib2Error): pass
-class ServerNotFoundError(HttpLib2Error): pass
-class ProxiesUnavailableError(HttpLib2Error): pass
-class CertificateValidationUnsupported(HttpLib2Error): pass
-class SSLHandshakeError(HttpLib2Error): pass
-class NotSupportedOnThisPlatform(HttpLib2Error): pass
-class CertificateHostnameMismatch(SSLHandshakeError):
- def __init__(self, desc, host, cert):
- HttpLib2Error.__init__(self, desc)
- self.host = host
- self.cert = cert
-
-# Open Items:
-# -----------
-# Proxy support
-
-# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
-
-# Pluggable cache storage (supports storing the cache in
-# flat files by default. We need a plug-in architecture
-# that can support Berkeley DB and Squid)
-
-# == Known Issues ==
-# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
-# Does not handle Cache-Control: max-stale
-# Does not use Age: headers when calculating cache freshness.
-
-
-# The number of redirections to follow before giving up.
-# Note that only GET redirects are automatically followed.
-# Will also honor 301 requests by saving that info and never
-# requesting that URI again.
-DEFAULT_MAX_REDIRECTS = 5
-
-try:
- # Users can optionally provide a module that tells us where the CA_CERTS
- # are located.
- import ca_certs_locater
- CA_CERTS = ca_certs_locater.get()
-except ImportError:
- # Default CA certificates file bundled with httplib2.
- CA_CERTS = os.path.join(
- os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
-
-# Which headers are hop-by-hop headers by default
-HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
-
-def _get_end2end_headers(response):
- hopbyhop = list(HOP_BY_HOP)
- hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
- return [header for header in response.keys() if header not in hopbyhop]
-
-URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
-
-def parse_uri(uri):
- """Parses a URI using the regex given in Appendix B of RFC 3986.
-
- (scheme, authority, path, query, fragment) = parse_uri(uri)
- """
- groups = URI.match(uri).groups()
- return (groups[1], groups[3], groups[4], groups[6], groups[8])
-
-def urlnorm(uri):
- (scheme, authority, path, query, fragment) = parse_uri(uri)
- if not scheme or not authority:
- raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
- authority = authority.lower()
- scheme = scheme.lower()
- if not path:
- path = "/"
- # Could do syntax based normalization of the URI before
- # computing the digest. See Section 6.2.2 of Std 66.
- request_uri = query and "?".join([path, query]) or path
- scheme = scheme.lower()
- defrag_uri = scheme + "://" + authority + request_uri
- return scheme, authority, request_uri, defrag_uri
-
-
-# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
-re_url_scheme = re.compile(r'^\w+://')
-re_slash = re.compile(r'[?/:|]+')
-
-def safename(filename):
- """Return a filename suitable for the cache.
-
- Strips dangerous and common characters to create a filename we
- can use to store the cache in.
- """
-
- try:
- if re_url_scheme.match(filename):
- if isinstance(filename,str):
- filename = filename.decode('utf-8')
- filename = filename.encode('idna')
- else:
- filename = filename.encode('idna')
- except UnicodeError:
- pass
- if isinstance(filename,unicode):
- filename=filename.encode('utf-8')
- filemd5 = _md5(filename).hexdigest()
- filename = re_url_scheme.sub("", filename)
- filename = re_slash.sub(",", filename)
-
- # limit length of filename
- if len(filename)>200:
- filename=filename[:200]
- return ",".join((filename, filemd5))
-
-NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
-def _normalize_headers(headers):
- return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
-
-def _parse_cache_control(headers):
- retval = {}
- if headers.has_key('cache-control'):
- parts = headers['cache-control'].split(',')
- parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
- parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
- retval = dict(parts_with_args + parts_wo_args)
- return retval
-
-# Whether to use a strict mode to parse WWW-Authenticate headers
-# Might lead to bad results in case of ill-formed header value,
-# so disabled by default, falling back to relaxed parsing.
-# Set to true to turn on, usefull for testing servers.
-USE_WWW_AUTH_STRICT_PARSING = 0
-
-# In regex below:
-# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
-# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
-# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
-# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?@,;:\\\"/[\]?={} \t]+(?!\"))\"?
-WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
-WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(? current_age:
- retval = "FRESH"
- return retval
-
-def _decompressContent(response, new_content):
- content = new_content
- try:
- encoding = response.get('content-encoding', None)
- if encoding in ['gzip', 'deflate']:
- if encoding == 'gzip':
- content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
- if encoding == 'deflate':
- content = zlib.decompress(content)
- response['content-length'] = str(len(content))
- # Record the historical presence of the encoding in a way the won't interfere.
- response['-content-encoding'] = response['content-encoding']
- del response['content-encoding']
- except IOError:
- content = ""
- raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
- return content
-
-def _updateCache(request_headers, response_headers, content, cache, cachekey):
- if cachekey:
- cc = _parse_cache_control(request_headers)
- cc_response = _parse_cache_control(response_headers)
- if cc.has_key('no-store') or cc_response.has_key('no-store'):
- cache.delete(cachekey)
- else:
- info = email.Message.Message()
- for key, value in response_headers.iteritems():
- if key not in ['status','content-encoding','transfer-encoding']:
- info[key] = value
-
- # Add annotations to the cache to indicate what headers
- # are variant for this request.
- vary = response_headers.get('vary', None)
- if vary:
- vary_headers = vary.lower().replace(' ', '').split(',')
- for header in vary_headers:
- key = '-varied-%s' % header
- try:
- info[key] = request_headers[header]
- except KeyError:
- pass
-
- status = response_headers.status
- if status == 304:
- status = 200
-
- status_header = 'status: %d\r\n' % status
-
- header_str = info.as_string()
-
- header_str = re.sub("\r(?!\n)|(? 0:
- service = "cl"
- # No point in guessing Base or Spreadsheet
- #elif request_uri.find("spreadsheets") > 0:
- # service = "wise"
-
- auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
- resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
- lines = content.split('\n')
- d = dict([tuple(line.split("=", 1)) for line in lines if line])
- if resp.status == 403:
- self.Auth = ""
- else:
- self.Auth = d['Auth']
-
- def request(self, method, request_uri, headers, content):
- """Modify the request headers to add the appropriate
- Authorization header."""
- headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
-
-
-AUTH_SCHEME_CLASSES = {
- "basic": BasicAuthentication,
- "wsse": WsseAuthentication,
- "digest": DigestAuthentication,
- "hmacdigest": HmacDigestAuthentication,
- "googlelogin": GoogleLoginAuthentication
-}
-
-AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
-
-class FileCache(object):
- """Uses a local directory as a store for cached files.
- Not really safe to use if multiple threads or processes are going to
- be running on the same cache.
- """
- def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
- self.cache = cache
- self.safe = safe
- if not os.path.exists(cache):
- os.makedirs(self.cache)
-
- def get(self, key):
- retval = None
- cacheFullPath = os.path.join(self.cache, self.safe(key))
- try:
- f = file(cacheFullPath, "rb")
- retval = f.read()
- f.close()
- except IOError:
- pass
- return retval
-
- def set(self, key, value):
- cacheFullPath = os.path.join(self.cache, self.safe(key))
- f = file(cacheFullPath, "wb")
- f.write(value)
- f.close()
-
- def delete(self, key):
- cacheFullPath = os.path.join(self.cache, self.safe(key))
- if os.path.exists(cacheFullPath):
- os.remove(cacheFullPath)
-
-class Credentials(object):
- def __init__(self):
- self.credentials = []
-
- def add(self, name, password, domain=""):
- self.credentials.append((domain.lower(), name, password))
-
- def clear(self):
- self.credentials = []
-
- def iter(self, domain):
- for (cdomain, name, password) in self.credentials:
- if cdomain == "" or domain == cdomain:
- yield (name, password)
-
-class KeyCerts(Credentials):
- """Identical to Credentials except that
- name/password are mapped to key/cert."""
- pass
-
-class AllHosts(object):
- pass
-
-class ProxyInfo(object):
- """Collect information required to use a proxy."""
- bypass_hosts = ()
-
- def __init__(self, proxy_type, proxy_host, proxy_port,
- proxy_rdns=None, proxy_user=None, proxy_pass=None):
- """The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
- constants. For example:
-
- p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP,
- proxy_host='localhost', proxy_port=8000)
- """
- self.proxy_type = proxy_type
- self.proxy_host = proxy_host
- self.proxy_port = proxy_port
- self.proxy_rdns = proxy_rdns
- self.proxy_user = proxy_user
- self.proxy_pass = proxy_pass
-
- def astuple(self):
- return (self.proxy_type, self.proxy_host, self.proxy_port,
- self.proxy_rdns, self.proxy_user, self.proxy_pass)
-
- def isgood(self):
- return (self.proxy_host != None) and (self.proxy_port != None)
-
- def applies_to(self, hostname):
- return not self.bypass_host(hostname)
-
- def bypass_host(self, hostname):
- """Has this host been excluded from the proxy config"""
- if self.bypass_hosts is AllHosts:
- return True
-
- bypass = False
- for domain in self.bypass_hosts:
- if hostname.endswith(domain):
- bypass = True
-
- return bypass
-
-
-def proxy_info_from_environment(method='http'):
- """
- Read proxy info from the environment variables.
- """
- if method not in ['http', 'https']:
- return
-
- env_var = method + '_proxy'
- url = os.environ.get(env_var, os.environ.get(env_var.upper()))
- if not url:
- return
- pi = proxy_info_from_url(url, method)
-
- no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', ''))
- bypass_hosts = []
- if no_proxy:
- bypass_hosts = no_proxy.split(',')
- # special case, no_proxy=* means all hosts bypassed
- if no_proxy == '*':
- bypass_hosts = AllHosts
-
- pi.bypass_hosts = bypass_hosts
- return pi
-
-def proxy_info_from_url(url, method='http'):
- """
- Construct a ProxyInfo from a URL (such as http_proxy env var)
- """
- url = urlparse.urlparse(url)
- username = None
- password = None
- port = None
- if '@' in url[1]:
- ident, host_port = url[1].split('@', 1)
- if ':' in ident:
- username, password = ident.split(':', 1)
- else:
- password = ident
- else:
- host_port = url[1]
- if ':' in host_port:
- host, port = host_port.split(':', 1)
- else:
- host = host_port
-
- if port:
- port = int(port)
- else:
- port = dict(https=443, http=80)[method]
-
- proxy_type = 3 # socks.PROXY_TYPE_HTTP
- return ProxyInfo(
- proxy_type = proxy_type,
- proxy_host = host,
- proxy_port = port,
- proxy_user = username or None,
- proxy_pass = password or None,
- )
-
-
-class HTTPConnectionWithTimeout(httplib.HTTPConnection):
- """
- HTTPConnection subclass that supports timeouts
-
- All timeouts are in seconds. If None is passed for timeout then
- Python's default timeout for sockets will be used. See for example
- the docs of socket.setdefaulttimeout():
- http://docs.python.org/library/socket.html#socket.setdefaulttimeout
- """
-
- def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
- httplib.HTTPConnection.__init__(self, host, port, strict)
- self.timeout = timeout
- self.proxy_info = proxy_info
-
- def connect(self):
- """Connect to the host and port specified in __init__."""
- # Mostly verbatim from httplib.py.
- if self.proxy_info and socks is None:
- raise ProxiesUnavailableError(
- 'Proxy support missing but proxy use was requested!')
- msg = "getaddrinfo returns an empty list"
- if self.proxy_info and self.proxy_info.isgood():
- use_proxy = True
- proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
- else:
- use_proxy = False
- if use_proxy and proxy_rdns:
- host = proxy_host
- port = proxy_port
- else:
- host = self.host
- port = self.port
-
- for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- try:
- if use_proxy:
- self.sock = socks.socksocket(af, socktype, proto)
- self.sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
- else:
- self.sock = socket.socket(af, socktype, proto)
- self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
- # Different from httplib: support timeouts.
- if has_timeout(self.timeout):
- self.sock.settimeout(self.timeout)
- # End of difference from httplib.
- if self.debuglevel > 0:
- print "connect: (%s, %s) ************" % (self.host, self.port)
- if use_proxy:
- print "proxy: %s ************" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
-
- self.sock.connect((self.host, self.port) + sa[2:])
- except socket.error, msg:
- if self.debuglevel > 0:
- print "connect fail: (%s, %s)" % (self.host, self.port)
- if use_proxy:
- print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
- if self.sock:
- self.sock.close()
- self.sock = None
- continue
- break
- if not self.sock:
- raise socket.error, msg
-
-class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
- """
- This class allows communication via SSL.
-
- All timeouts are in seconds. If None is passed for timeout then
- Python's default timeout for sockets will be used. See for example
- the docs of socket.setdefaulttimeout():
- http://docs.python.org/library/socket.html#socket.setdefaulttimeout
- """
- def __init__(self, host, port=None, key_file=None, cert_file=None,
- strict=None, timeout=None, proxy_info=None,
- ca_certs=None, disable_ssl_certificate_validation=False):
- httplib.HTTPSConnection.__init__(self, host, port=port,
- key_file=key_file,
- cert_file=cert_file, strict=strict)
- self.timeout = timeout
- self.proxy_info = proxy_info
- if ca_certs is None:
- ca_certs = CA_CERTS
- self.ca_certs = ca_certs
- self.disable_ssl_certificate_validation = \
- disable_ssl_certificate_validation
-
- # The following two methods were adapted from https_wrapper.py, released
- # with the Google Appengine SDK at
- # http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
- # under the following license:
- #
- # Copyright 2007 Google Inc.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- #
-
- def _GetValidHostsForCert(self, cert):
- """Returns a list of valid host globs for an SSL certificate.
-
- Args:
- cert: A dictionary representing an SSL certificate.
- Returns:
- list: A list of valid host globs.
- """
- if 'subjectAltName' in cert:
- return [x[1] for x in cert['subjectAltName']
- if x[0].lower() == 'dns']
- else:
- return [x[0][1] for x in cert['subject']
- if x[0][0].lower() == 'commonname']
-
- def _ValidateCertificateHostname(self, cert, hostname):
- """Validates that a given hostname is valid for an SSL certificate.
-
- Args:
- cert: A dictionary representing an SSL certificate.
- hostname: The hostname to test.
- Returns:
- bool: Whether or not the hostname is valid for this certificate.
- """
- hosts = self._GetValidHostsForCert(cert)
- for host in hosts:
- host_re = host.replace('.', '\.').replace('*', '[^.]*')
- if re.search('^%s$' % (host_re,), hostname, re.I):
- return True
- return False
-
- def connect(self):
- "Connect to a host on a given (SSL) port."
-
- msg = "getaddrinfo returns an empty list"
- if self.proxy_info and self.proxy_info.isgood():
- use_proxy = True
- proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
- else:
- use_proxy = False
- if use_proxy and proxy_rdns:
- host = proxy_host
- port = proxy_port
- else:
- host = self.host
- port = self.port
-
- address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
- for family, socktype, proto, canonname, sockaddr in address_info:
- try:
- if use_proxy:
- sock = socks.socksocket(family, socktype, proto)
-
- sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
- else:
- sock = socket.socket(family, socktype, proto)
- sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-
- if has_timeout(self.timeout):
- sock.settimeout(self.timeout)
- sock.connect((self.host, self.port))
- self.sock =_ssl_wrap_socket(
- sock, self.key_file, self.cert_file,
- self.disable_ssl_certificate_validation, self.ca_certs)
- if self.debuglevel > 0:
- print "connect: (%s, %s)" % (self.host, self.port)
- if use_proxy:
- print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
- if not self.disable_ssl_certificate_validation:
- cert = self.sock.getpeercert()
- hostname = self.host.split(':', 0)[0]
- if not self._ValidateCertificateHostname(cert, hostname):
- raise CertificateHostnameMismatch(
- 'Server presented certificate that does not match '
- 'host %s: %s' % (hostname, cert), hostname, cert)
- except ssl_SSLError, e:
- if sock:
- sock.close()
- if self.sock:
- self.sock.close()
- self.sock = None
- # Unfortunately the ssl module doesn't seem to provide any way
- # to get at more detailed error information, in particular
- # whether the error is due to certificate validation or
- # something else (such as SSL protocol mismatch).
- if e.errno == ssl.SSL_ERROR_SSL:
- raise SSLHandshakeError(e)
- else:
- raise
- except (socket.timeout, socket.gaierror):
- raise
- except socket.error, msg:
- if self.debuglevel > 0:
- print "connect fail: (%s, %s)" % (self.host, self.port)
- if use_proxy:
- print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
- if self.sock:
- self.sock.close()
- self.sock = None
- continue
- break
- if not self.sock:
- raise socket.error, msg
-
-SCHEME_TO_CONNECTION = {
- 'http': HTTPConnectionWithTimeout,
- 'https': HTTPSConnectionWithTimeout
-}
-
-# Use a different connection object for Google App Engine
-try:
- try:
- from google.appengine.api import apiproxy_stub_map
- if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
- raise ImportError # Bail out; we're not actually running on App Engine.
- from google.appengine.api.urlfetch import fetch
- from google.appengine.api.urlfetch import InvalidURLError
- except (ImportError, AttributeError):
- from google3.apphosting.api import apiproxy_stub_map
- if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
- raise ImportError # Bail out; we're not actually running on App Engine.
- from google3.apphosting.api.urlfetch import fetch
- from google3.apphosting.api.urlfetch import InvalidURLError
-
- def _new_fixed_fetch(validate_certificate):
- def fixed_fetch(url, payload=None, method="GET", headers={},
- allow_truncated=False, follow_redirects=True,
- deadline=5):
- return fetch(url, payload=payload, method=method, headers=headers,
- allow_truncated=allow_truncated,
- follow_redirects=follow_redirects, deadline=deadline,
- validate_certificate=validate_certificate)
- return fixed_fetch
-
- class AppEngineHttpConnection(httplib.HTTPConnection):
- """Use httplib on App Engine, but compensate for its weirdness.
-
- The parameters key_file, cert_file, proxy_info, ca_certs, and
- disable_ssl_certificate_validation are all dropped on the ground.
- """
- def __init__(self, host, port=None, key_file=None, cert_file=None,
- strict=None, timeout=None, proxy_info=None, ca_certs=None,
- disable_ssl_certificate_validation=False):
- httplib.HTTPConnection.__init__(self, host, port=port,
- strict=strict, timeout=timeout)
-
- class AppEngineHttpsConnection(httplib.HTTPSConnection):
- """Same as AppEngineHttpConnection, but for HTTPS URIs."""
- def __init__(self, host, port=None, key_file=None, cert_file=None,
- strict=None, timeout=None, proxy_info=None, ca_certs=None,
- disable_ssl_certificate_validation=False):
- httplib.HTTPSConnection.__init__(self, host, port=port,
- key_file=key_file,
- cert_file=cert_file, strict=strict,
- timeout=timeout)
- self._fetch = _new_fixed_fetch(
- not disable_ssl_certificate_validation)
-
- # Update the connection classes to use the Googel App Engine specific ones.
- SCHEME_TO_CONNECTION = {
- 'http': AppEngineHttpConnection,
- 'https': AppEngineHttpsConnection
- }
-except (ImportError, AttributeError):
- pass
-
-
-class Http(object):
- """An HTTP client that handles:
-
- - all methods
- - caching
- - ETags
- - compression,
- - HTTPS
- - Basic
- - Digest
- - WSSE
-
- and more.
- """
- def __init__(self, cache=None, timeout=None,
- proxy_info=proxy_info_from_environment,
- ca_certs=None, disable_ssl_certificate_validation=False):
- """If 'cache' is a string then it is used as a directory name for
- a disk cache. Otherwise it must be an object that supports the
- same interface as FileCache.
-
- All timeouts are in seconds. If None is passed for timeout
- then Python's default timeout for sockets will be used. See
- for example the docs of socket.setdefaulttimeout():
- http://docs.python.org/library/socket.html#socket.setdefaulttimeout
-
- `proxy_info` may be:
- - a callable that takes the http scheme ('http' or 'https') and
- returns a ProxyInfo instance per request. By default, uses
- proxy_nfo_from_environment.
- - a ProxyInfo instance (static proxy config).
- - None (proxy disabled).
-
- ca_certs is the path of a file containing root CA certificates for SSL
- server certificate validation. By default, a CA cert file bundled with
- httplib2 is used.
-
- If disable_ssl_certificate_validation is true, SSL cert validation will
- not be performed.
- """
- self.proxy_info = proxy_info
- self.ca_certs = ca_certs
- self.disable_ssl_certificate_validation = \
- disable_ssl_certificate_validation
-
- # Map domain name to an httplib connection
- self.connections = {}
- # The location of the cache, for now a directory
- # where cached responses are held.
- if cache and isinstance(cache, basestring):
- self.cache = FileCache(cache)
- else:
- self.cache = cache
-
- # Name/password
- self.credentials = Credentials()
-
- # Key/cert
- self.certificates = KeyCerts()
-
- # authorization objects
- self.authorizations = []
-
- # If set to False then no redirects are followed, even safe ones.
- self.follow_redirects = True
-
- # Which HTTP methods do we apply optimistic concurrency to, i.e.
- # which methods get an "if-match:" etag header added to them.
- self.optimistic_concurrency_methods = ["PUT", "PATCH"]
-
- # If 'follow_redirects' is True, and this is set to True then
- # all redirecs are followed, including unsafe ones.
- self.follow_all_redirects = False
-
- self.ignore_etag = False
-
- self.force_exception_to_status_code = False
-
- self.timeout = timeout
-
- # Keep Authorization: headers on a redirect.
- self.forward_authorization_headers = False
-
- def __getstate__(self):
- state_dict = copy.copy(self.__dict__)
- # In case request is augmented by some foreign object such as
- # credentials which handle auth
- if 'request' in state_dict:
- del state_dict['request']
- if 'connections' in state_dict:
- del state_dict['connections']
- return state_dict
-
- def __setstate__(self, state):
- self.__dict__.update(state)
- self.connections = {}
-
- def _auth_from_challenge(self, host, request_uri, headers, response, content):
- """A generator that creates Authorization objects
- that can be applied to requests.
- """
- challenges = _parse_www_authenticate(response, 'www-authenticate')
- for cred in self.credentials.iter(host):
- for scheme in AUTH_SCHEME_ORDER:
- if challenges.has_key(scheme):
- yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
-
- def add_credentials(self, name, password, domain=""):
- """Add a name and password that will be used
- any time a request requires authentication."""
- self.credentials.add(name, password, domain)
-
- def add_certificate(self, key, cert, domain):
- """Add a key and cert that will be used
- any time a request requires authentication."""
- self.certificates.add(key, cert, domain)
-
- def clear_credentials(self):
- """Remove all the names and passwords
- that are used for authentication"""
- self.credentials.clear()
- self.authorizations = []
-
- def _conn_request(self, conn, request_uri, method, body, headers):
- for i in range(RETRIES):
- try:
- if hasattr(conn, 'sock') and conn.sock is None:
- conn.connect()
- conn.request(method, request_uri, body, headers)
- except socket.timeout:
- raise
- except socket.gaierror:
- conn.close()
- raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
- except ssl_SSLError:
- conn.close()
- raise
- except socket.error, e:
- err = 0
- if hasattr(e, 'args'):
- err = getattr(e, 'args')[0]
- else:
- err = e.errno
- if err == errno.ECONNREFUSED: # Connection refused
- raise
- except httplib.HTTPException:
- # Just because the server closed the connection doesn't apparently mean
- # that the server didn't send a response.
- if hasattr(conn, 'sock') and conn.sock is None:
- if i < RETRIES-1:
- conn.close()
- conn.connect()
- continue
- else:
- conn.close()
- raise
- if i < RETRIES-1:
- conn.close()
- conn.connect()
- continue
- try:
- response = conn.getresponse()
- except (socket.error, httplib.HTTPException):
- if i < RETRIES-1:
- conn.close()
- conn.connect()
- continue
- else:
- conn.close()
- raise
- else:
- content = ""
- if method == "HEAD":
- conn.close()
- else:
- content = response.read()
- response = Response(response)
- if method != "HEAD":
- content = _decompressContent(response, content)
- break
- return (response, content)
-
-
- def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
- """Do the actual request using the connection object
- and also follow one level of redirects if necessary"""
-
- auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
- auth = auths and sorted(auths)[0][1] or None
- if auth:
- auth.request(method, request_uri, headers, body)
-
- (response, content) = self._conn_request(conn, request_uri, method, body, headers)
-
- if auth:
- if auth.response(response, body):
- auth.request(method, request_uri, headers, body)
- (response, content) = self._conn_request(conn, request_uri, method, body, headers )
- response._stale_digest = 1
-
- if response.status == 401:
- for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
- authorization.request(method, request_uri, headers, body)
- (response, content) = self._conn_request(conn, request_uri, method, body, headers, )
- if response.status != 401:
- self.authorizations.append(authorization)
- authorization.response(response, body)
- break
-
- if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
- if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
- # Pick out the location header and basically start from the beginning
- # remembering first to strip the ETag header and decrement our 'depth'
- if redirections:
- if not response.has_key('location') and response.status != 300:
- raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
- # Fix-up relative redirects (which violate an RFC 2616 MUST)
- if response.has_key('location'):
- location = response['location']
- (scheme, authority, path, query, fragment) = parse_uri(location)
- if authority == None:
- response['location'] = urlparse.urljoin(absolute_uri, location)
- if response.status == 301 and method in ["GET", "HEAD"]:
- response['-x-permanent-redirect-url'] = response['location']
- if not response.has_key('content-location'):
- response['content-location'] = absolute_uri
- _updateCache(headers, response, content, self.cache, cachekey)
- if headers.has_key('if-none-match'):
- del headers['if-none-match']
- if headers.has_key('if-modified-since'):
- del headers['if-modified-since']
- if 'authorization' in headers and not self.forward_authorization_headers:
- del headers['authorization']
- if response.has_key('location'):
- location = response['location']
- old_response = copy.deepcopy(response)
- if not old_response.has_key('content-location'):
- old_response['content-location'] = absolute_uri
- redirect_method = method
- if response.status in [302, 303]:
- redirect_method = "GET"
- body = None
- (response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
- response.previous = old_response
- else:
- raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
- elif response.status in [200, 203] and method in ["GET", "HEAD"]:
- # Don't cache 206's since we aren't going to handle byte range requests
- if not response.has_key('content-location'):
- response['content-location'] = absolute_uri
- _updateCache(headers, response, content, self.cache, cachekey)
-
- return (response, content)
-
- def _normalize_headers(self, headers):
- return _normalize_headers(headers)
-
-# Need to catch and rebrand some exceptions
-# Then need to optionally turn all exceptions into status codes
-# including all socket.* and httplib.* exceptions.
-
-
- def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
- """ Performs a single HTTP request.
-
- The 'uri' is the URI of the HTTP resource and can begin with either
- 'http' or 'https'. The value of 'uri' must be an absolute URI.
-
- The 'method' is the HTTP method to perform, such as GET, POST, DELETE,
- etc. There is no restriction on the methods allowed.
-
- The 'body' is the entity body to be sent with the request. It is a
- string object.
-
- Any extra headers that are to be sent with the request should be
- provided in the 'headers' dictionary.
-
- The maximum number of redirect to follow before raising an
- exception is 'redirections. The default is 5.
-
- The return value is a tuple of (response, content), the first
- being and instance of the 'Response' class, the second being
- a string that contains the response entity body.
- """
- try:
- if headers is None:
- headers = {}
- else:
- headers = self._normalize_headers(headers)
-
- if not headers.has_key('user-agent'):
- headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
-
- uri = iri2uri(uri)
-
- (scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
- domain_port = authority.split(":")[0:2]
- if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
- scheme = 'https'
- authority = domain_port[0]
-
- proxy_info = self._get_proxy_info(scheme, authority)
-
- conn_key = scheme+":"+authority
- if conn_key in self.connections:
- conn = self.connections[conn_key]
- else:
- if not connection_type:
- connection_type = SCHEME_TO_CONNECTION[scheme]
- certs = list(self.certificates.iter(authority))
- if scheme == 'https':
- if certs:
- conn = self.connections[conn_key] = connection_type(
- authority, key_file=certs[0][0],
- cert_file=certs[0][1], timeout=self.timeout,
- proxy_info=proxy_info,
- ca_certs=self.ca_certs,
- disable_ssl_certificate_validation=
- self.disable_ssl_certificate_validation)
- else:
- conn = self.connections[conn_key] = connection_type(
- authority, timeout=self.timeout,
- proxy_info=proxy_info,
- ca_certs=self.ca_certs,
- disable_ssl_certificate_validation=
- self.disable_ssl_certificate_validation)
- else:
- conn = self.connections[conn_key] = connection_type(
- authority, timeout=self.timeout,
- proxy_info=proxy_info)
- conn.set_debuglevel(debuglevel)
-
- if 'range' not in headers and 'accept-encoding' not in headers:
- headers['accept-encoding'] = 'gzip, deflate'
-
- info = email.Message.Message()
- cached_value = None
- if self.cache:
- cachekey = defrag_uri
- cached_value = self.cache.get(cachekey)
- if cached_value:
- # info = email.message_from_string(cached_value)
- #
- # Need to replace the line above with the kludge below
- # to fix the non-existent bug not fixed in this
- # bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
- try:
- info, content = cached_value.split('\r\n\r\n', 1)
- feedparser = email.FeedParser.FeedParser()
- feedparser.feed(info)
- info = feedparser.close()
- feedparser._parse = None
- except (IndexError, ValueError):
- self.cache.delete(cachekey)
- cachekey = None
- cached_value = None
- else:
- cachekey = None
-
- if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
- # http://www.w3.org/1999/04/Editing/
- headers['if-match'] = info['etag']
-
- if method not in ["GET", "HEAD"] and self.cache and cachekey:
- # RFC 2616 Section 13.10
- self.cache.delete(cachekey)
-
- # Check the vary header in the cache to see if this request
- # matches what varies in the cache.
- if method in ['GET', 'HEAD'] and 'vary' in info:
- vary = info['vary']
- vary_headers = vary.lower().replace(' ', '').split(',')
- for header in vary_headers:
- key = '-varied-%s' % header
- value = info[key]
- if headers.get(header, None) != value:
- cached_value = None
- break
-
- if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
- if info.has_key('-x-permanent-redirect-url'):
- # Should cached permanent redirects be counted in our redirection count? For now, yes.
- if redirections <= 0:
- raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
- (response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
- response.previous = Response(info)
- response.previous.fromcache = True
- else:
- # Determine our course of action:
- # Is the cached entry fresh or stale?
- # Has the client requested a non-cached response?
- #
- # There seems to be three possible answers:
- # 1. [FRESH] Return the cache entry w/o doing a GET
- # 2. [STALE] Do the GET (but add in cache validators if available)
- # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
- entry_disposition = _entry_disposition(info, headers)
-
- if entry_disposition == "FRESH":
- if not cached_value:
- info['status'] = '504'
- content = ""
- response = Response(info)
- if cached_value:
- response.fromcache = True
- return (response, content)
-
- if entry_disposition == "STALE":
- if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
- headers['if-none-match'] = info['etag']
- if info.has_key('last-modified') and not 'last-modified' in headers:
- headers['if-modified-since'] = info['last-modified']
- elif entry_disposition == "TRANSPARENT":
- pass
-
- (response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
-
- if response.status == 304 and method == "GET":
- # Rewrite the cache entry with the new end-to-end headers
- # Take all headers that are in response
- # and overwrite their values in info.
- # unless they are hop-by-hop, or are listed in the connection header.
-
- for key in _get_end2end_headers(response):
- info[key] = response[key]
- merged_response = Response(info)
- if hasattr(response, "_stale_digest"):
- merged_response._stale_digest = response._stale_digest
- _updateCache(headers, merged_response, content, self.cache, cachekey)
- response = merged_response
- response.status = 200
- response.fromcache = True
-
- elif response.status == 200:
- content = new_content
- else:
- self.cache.delete(cachekey)
- content = new_content
- else:
- cc = _parse_cache_control(headers)
- if cc.has_key('only-if-cached'):
- info['status'] = '504'
- response = Response(info)
- content = ""
- else:
- (response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
- except Exception, e:
- if self.force_exception_to_status_code:
- if isinstance(e, HttpLib2ErrorWithResponse):
- response = e.response
- content = e.content
- response.status = 500
- response.reason = str(e)
- elif isinstance(e, socket.timeout):
- content = "Request Timeout"
- response = Response({
- "content-type": "text/plain",
- "status": "408",
- "content-length": len(content)
- })
- response.reason = "Request Timeout"
- else:
- content = str(e)
- response = Response({
- "content-type": "text/plain",
- "status": "400",
- "content-length": len(content)
- })
- response.reason = "Bad Request"
- else:
- raise
-
-
- return (response, content)
-
- def _get_proxy_info(self, scheme, authority):
- """Return a ProxyInfo instance (or None) based on the scheme
- and authority.
- """
- hostname, port = urllib.splitport(authority)
- proxy_info = self.proxy_info
- if callable(proxy_info):
- proxy_info = proxy_info(scheme)
-
- if (hasattr(proxy_info, 'applies_to')
- and not proxy_info.applies_to(hostname)):
- proxy_info = None
- return proxy_info
-
-
-class Response(dict):
- """An object more like email.Message than httplib.HTTPResponse."""
-
- """Is this response from our local cache"""
- fromcache = False
-
- """HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
- version = 11
-
- "Status code returned by server. "
- status = 200
-
- """Reason phrase returned by server."""
- reason = "Ok"
-
- previous = None
-
- def __init__(self, info):
- # info is either an email.Message or
- # an httplib.HTTPResponse object.
- if isinstance(info, httplib.HTTPResponse):
- for key, value in info.getheaders():
- self[key.lower()] = value
- self.status = info.status
- self['status'] = str(self.status)
- self.reason = info.reason
- self.version = info.version
- elif isinstance(info, email.Message.Message):
- for key, value in info.items():
- self[key.lower()] = value
- self.status = int(self['status'])
- else:
- for key, value in info.iteritems():
- self[key.lower()] = value
- self.status = int(self.get('status', self.status))
- self.reason = self.get('reason', self.reason)
-
-
- def __getattr__(self, name):
- if name == 'dict':
- return self
- else:
- raise AttributeError, name
diff --git a/python-packages/httplib2/cacerts.txt b/python-packages/httplib2/cacerts.txt
deleted file mode 100644
index d8a0027cc7..0000000000
--- a/python-packages/httplib2/cacerts.txt
+++ /dev/null
@@ -1,739 +0,0 @@
-# Certifcate Authority certificates for validating SSL connections.
-#
-# This file contains PEM format certificates generated from
-# http://mxr.mozilla.org/seamonkey/source/security/nss/lib/ckfw/builtins/certdata.txt
-#
-# ***** BEGIN LICENSE BLOCK *****
-# Version: MPL 1.1/GPL 2.0/LGPL 2.1
-#
-# The contents of this file are subject to the Mozilla Public License Version
-# 1.1 (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-# http://www.mozilla.org/MPL/
-#
-# Software distributed under the License is distributed on an "AS IS" basis,
-# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
-# for the specific language governing rights and limitations under the
-# License.
-#
-# The Original Code is the Netscape security libraries.
-#
-# The Initial Developer of the Original Code is
-# Netscape Communications Corporation.
-# Portions created by the Initial Developer are Copyright (C) 1994-2000
-# the Initial Developer. All Rights Reserved.
-#
-# Contributor(s):
-#
-# Alternatively, the contents of this file may be used under the terms of
-# either the GNU General Public License Version 2 or later (the "GPL"), or
-# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
-# in which case the provisions of the GPL or the LGPL are applicable instead
-# of those above. If you wish to allow use of your version of this file only
-# under the terms of either the GPL or the LGPL, and not to allow others to
-# use your version of this file under the terms of the MPL, indicate your
-# decision by deleting the provisions above and replace them with the notice
-# and other provisions required by the GPL or the LGPL. If you do not delete
-# the provisions above, a recipient may use your version of this file under
-# the terms of any one of the MPL, the GPL or the LGPL.
-#
-# ***** END LICENSE BLOCK *****
-
-Verisign/RSA Secure Server CA
-=============================
-
------BEGIN CERTIFICATE-----
-MIICNDCCAaECEAKtZn5ORf5eV288mBle3cAwDQYJKoZIhvcNAQECBQAwXzELMAkG
-A1UEBhMCVVMxIDAeBgNVBAoTF1JTQSBEYXRhIFNlY3VyaXR5LCBJbmMuMS4wLAYD
-VQQLEyVTZWN1cmUgU2VydmVyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk0
-MTEwOTAwMDAwMFoXDTEwMDEwNzIzNTk1OVowXzELMAkGA1UEBhMCVVMxIDAeBgNV
-BAoTF1JTQSBEYXRhIFNlY3VyaXR5LCBJbmMuMS4wLAYDVQQLEyVTZWN1cmUgU2Vy
-dmVyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGbMA0GCSqGSIb3DQEBAQUAA4GJ
-ADCBhQJ+AJLOesGugz5aqomDV6wlAXYMra6OLDfO6zV4ZFQD5YRAUcm/jwjiioII
-0haGN1XpsSECrXZogZoFokvJSyVmIlZsiAeP94FZbYQHZXATcXY+m3dM41CJVphI
-uR2nKRoTLkoRWZweFdVJVCxzOmmCsZc5nG1wZ0jl3S3WyB57AgMBAAEwDQYJKoZI
-hvcNAQECBQADfgBl3X7hsuyw4jrg7HFGmhkRuNPHoLQDQCYCPgmc4RKz0Vr2N6W3
-YQO2WxZpO8ZECAyIUwxrl0nHPjXcbLm7qt9cuzovk2C2qUtN8iD3zV9/ZHuO3ABc
-1/p3yjkWWW8O6tO1g39NTUJWdrTJXwT4OPjr0l91X817/OWOgHz8UA==
------END CERTIFICATE-----
-
-Thawte Personal Basic CA
-========================
-
------BEGIN CERTIFICATE-----
-MIIDITCCAoqgAwIBAgIBADANBgkqhkiG9w0BAQQFADCByzELMAkGA1UEBhMCWkEx
-FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMRowGAYD
-VQQKExFUaGF3dGUgQ29uc3VsdGluZzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBT
-ZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UEAxMYVGhhd3RlIFBlcnNvbmFsIEJhc2lj
-IENBMSgwJgYJKoZIhvcNAQkBFhlwZXJzb25hbC1iYXNpY0B0aGF3dGUuY29tMB4X
-DTk2MDEwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgcsxCzAJBgNVBAYTAlpBMRUw
-EwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEaMBgGA1UE
-ChMRVGhhd3RlIENvbnN1bHRpbmcxKDAmBgNVBAsTH0NlcnRpZmljYXRpb24gU2Vy
-dmljZXMgRGl2aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQZXJzb25hbCBCYXNpYyBD
-QTEoMCYGCSqGSIb3DQEJARYZcGVyc29uYWwtYmFzaWNAdGhhd3RlLmNvbTCBnzAN
-BgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAvLyTU23AUE+CFeZIlDWmWr5vQvoPR+53
-dXLdjUmbllegeNTKP1GzaQuRdhciB5dqxFGTS+CN7zeVoQxN2jSQHReJl+A1OFdK
-wPQIcOk8RHtQfmGakOMj04gRRif1CwcOu93RfyAKiLlWCy4cgNrx454p7xS9CkT7
-G1sY0b8jkyECAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQQF
-AAOBgQAt4plrsD16iddZopQBHyvdEktTwq1/qqcAXJFAVyVKOKqEcLnZgA+le1z7
-c8a914phXAPjLSeoF+CEhULcXpvGt7Jtu3Sv5D/Lp7ew4F2+eIMllNLbgQ95B21P
-9DkVWlIBe94y1k049hJcBlDfBVu9FEuh3ym6O0GN92NWod8isQ==
------END CERTIFICATE-----
-
-Thawte Personal Premium CA
-==========================
-
------BEGIN CERTIFICATE-----
-MIIDKTCCApKgAwIBAgIBADANBgkqhkiG9w0BAQQFADCBzzELMAkGA1UEBhMCWkEx
-FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMRowGAYD
-VQQKExFUaGF3dGUgQ29uc3VsdGluZzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBT
-ZXJ2aWNlcyBEaXZpc2lvbjEjMCEGA1UEAxMaVGhhd3RlIFBlcnNvbmFsIFByZW1p
-dW0gQ0ExKjAoBgkqhkiG9w0BCQEWG3BlcnNvbmFsLXByZW1pdW1AdGhhd3RlLmNv
-bTAeFw05NjAxMDEwMDAwMDBaFw0yMDEyMzEyMzU5NTlaMIHPMQswCQYDVQQGEwJa
-QTEVMBMGA1UECBMMV2VzdGVybiBDYXBlMRIwEAYDVQQHEwlDYXBlIFRvd24xGjAY
-BgNVBAoTEVRoYXd0ZSBDb25zdWx0aW5nMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9u
-IFNlcnZpY2VzIERpdmlzaW9uMSMwIQYDVQQDExpUaGF3dGUgUGVyc29uYWwgUHJl
-bWl1bSBDQTEqMCgGCSqGSIb3DQEJARYbcGVyc29uYWwtcHJlbWl1bUB0aGF3dGUu
-Y29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDJZtn4B0TPuYwu8KHvE0Vs
-Bd/eJxZRNkERbGw77f4QfRKe5ZtCmv5gMcNmt3M6SK5O0DI3lIi1DbbZ8/JE2dWI
-Et12TfIa/G8jHnrx2JhFTgcQ7xZC0EN1bUre4qrJMf8fAHB8Zs8QJQi6+u4A6UYD
-ZicRFTuqW/KY3TZCstqIdQIDAQABoxMwETAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
-SIb3DQEBBAUAA4GBAGk2ifc0KjNyL2071CKyuG+axTZmDhs8obF1Wub9NdP4qPIH
-b4Vnjt4rueIXsDqg8A6iAJrf8xQVbrvIhVqYgPn/vnQdPfP+MCXRNzRn+qVxeTBh
-KXLA4CxM+1bkOqhv5TJZUtt1KFBZDPgLGeSs2a+WjS9Q2wfD6h+rM+D1KzGJ
------END CERTIFICATE-----
-
-Thawte Personal Freemail CA
-===========================
-
------BEGIN CERTIFICATE-----
-MIIDLTCCApagAwIBAgIBADANBgkqhkiG9w0BAQQFADCB0TELMAkGA1UEBhMCWkEx
-FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMRowGAYD
-VQQKExFUaGF3dGUgQ29uc3VsdGluZzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBT
-ZXJ2aWNlcyBEaXZpc2lvbjEkMCIGA1UEAxMbVGhhd3RlIFBlcnNvbmFsIEZyZWVt
-YWlsIENBMSswKQYJKoZIhvcNAQkBFhxwZXJzb25hbC1mcmVlbWFpbEB0aGF3dGUu
-Y29tMB4XDTk2MDEwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgdExCzAJBgNVBAYT
-AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEa
-MBgGA1UEChMRVGhhd3RlIENvbnN1bHRpbmcxKDAmBgNVBAsTH0NlcnRpZmljYXRp
-b24gU2VydmljZXMgRGl2aXNpb24xJDAiBgNVBAMTG1RoYXd0ZSBQZXJzb25hbCBG
-cmVlbWFpbCBDQTErMCkGCSqGSIb3DQEJARYccGVyc29uYWwtZnJlZW1haWxAdGhh
-d3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA1GnX1LCUZFtx6UfY
-DFG26nKRsIRefS0Nj3sS34UldSh0OkIsYyeflXtL734Zhx2G6qPduc6WZBrCFG5E
-rHzmj+hND3EfQDimAKOHePb5lIZererAXnbr2RSjXW56fAylS1V/Bhkpf56aJtVq
-uzgkCGqYx7Hao5iR/Xnb5VrEHLkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zAN
-BgkqhkiG9w0BAQQFAAOBgQDH7JJ+Tvj1lqVnYiqk8E0RYNBvjWBYYawmu1I1XAjP
-MPuoSpaKH2JCI4wXD/S6ZJwXrEcp352YXtJsYHFcoqzceePnbgBHH7UNKOgCneSa
-/RP0ptl8sfjcXyMmCZGAc9AUG95DqYMl8uacLxXK/qarigd1iwzdUYRr5PjRznei
-gQ==
------END CERTIFICATE-----
-
-Thawte Server CA
-================
-
------BEGIN CERTIFICATE-----
-MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkEx
-FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD
-VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv
-biBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEm
-MCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wHhcNOTYwODAx
-MDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT
-DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3
-dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNl
-cyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3
-DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQAD
-gY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl/Kj0R1HahbUgdJSGHg91
-yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg71CcEJRCX
-L+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGj
-EzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG
-7oWDTSEwjsrZqG9JGubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6e
-QNuozDJ0uW8NxuOzRAvZim+aKZuZGCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZ
-qdq5snUb9kLy78fyGPmJvKP/iiMucEc=
------END CERTIFICATE-----
-
-Thawte Premium Server CA
-========================
-
------BEGIN CERTIFICATE-----
-MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkEx
-FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD
-VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv
-biBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UEAxMYVGhhd3RlIFByZW1pdW0gU2Vy
-dmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZlckB0aGF3dGUuY29t
-MB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYTAlpB
-MRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsG
-A1UEChMUVGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRp
-b24gU2VydmljZXMgRGl2aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNl
-cnZlciBDQTEoMCYGCSqGSIb3DQEJARYZcHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNv
-bTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2aovXwlue2oFBYo847kkE
-VdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIhUdib0GfQ
-ug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMR
-uHM/qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG
-9w0BAQQFAAOBgQAmSCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUI
-hfzJATj/Tb7yFkJD57taRvvBxhEf8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JM
-pAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7tUCemDaYj+bvLpgcUQg==
------END CERTIFICATE-----
-
-Equifax Secure CA
-=================
-
------BEGIN CERTIFICATE-----
-MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV
-UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy
-dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1
-MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx
-dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B
-AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f
-BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A
-cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC
-AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ
-MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm
-aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw
-ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj
-IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF
-MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA
-A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y
-7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh
-1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4
------END CERTIFICATE-----
-
-Verisign Class 1 Public Primary Certification Authority
-=======================================================
-
------BEGIN CERTIFICATE-----
-MIICPTCCAaYCEQDNun9W8N/kvFT+IqyzcqpVMA0GCSqGSIb3DQEBAgUAMF8xCzAJ
-BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE3MDUGA1UECxMuQ2xh
-c3MgMSBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw05
-NjAxMjkwMDAwMDBaFw0yODA4MDEyMzU5NTlaMF8xCzAJBgNVBAYTAlVTMRcwFQYD
-VQQKEw5WZXJpU2lnbiwgSW5jLjE3MDUGA1UECxMuQ2xhc3MgMSBQdWJsaWMgUHJp
-bWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCBnzANBgkqhkiG9w0BAQEFAAOB
-jQAwgYkCgYEA5Rm/baNWYS2ZSHH2Z965jeu3noaACpEO+jglr0aIguVzqKCbJF0N
-H8xlbgyw0FaEGIeaBpsQoXPftFg5a27B9hXVqKg/qhIGjTGsf7A01480Z4gJzRQR
-4k5FVmkfeAKA2txHkSm7NsljXMXg1y2He6G3MrB7MLoqLzGq7qNn2tsCAwEAATAN
-BgkqhkiG9w0BAQIFAAOBgQBMP7iLxmjf7kMzDl3ppssHhE16M/+SG/Q2rdiVIjZo
-EWx8QszznC7EBz8UsA9P/5CSdvnivErpj82ggAr3xSnxgiJduLHdgSOjeyUVRjB5
-FvjqBUuUfx3CHMjjt/QQQDwTw18fU+hI5Ia0e6E1sHslurjTjqs/OJ0ANACY89Fx
-lA==
------END CERTIFICATE-----
-
-Verisign Class 2 Public Primary Certification Authority
-=======================================================
-
------BEGIN CERTIFICATE-----
-MIICPDCCAaUCEC0b/EoXjaOR6+f/9YtFvgswDQYJKoZIhvcNAQECBQAwXzELMAkG
-A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFz
-cyAyIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2
-MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVowXzELMAkGA1UEBhMCVVMxFzAVBgNV
-BAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAyIFB1YmxpYyBQcmlt
-YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GN
-ADCBiQKBgQC2WoujDWojg4BrzzmH9CETMwZMJaLtVRKXxaeAufqDwSCg+i8VDXyh
-YGt+eSz6Bg86rvYbb7HS/y8oUl+DfUvEerf4Zh+AVPy3wo5ZShRXRtGak75BkQO7
-FYCTXOvnzAhsPz6zSvz/S2wj1VCCJkQZjiPDceoZJEcEnnW/yKYAHwIDAQABMA0G
-CSqGSIb3DQEBAgUAA4GBAIobK/o5wXTXXtgZZKJYSi034DNHD6zt96rbHuSLBlxg
-J8pFUs4W7z8GZOeUaHxgMxURaa+dYo2jA1Rrpr7l7gUYYAS/QoD90KioHgE796Nc
-r6Pc5iaAIzy4RHT3Cq5Ji2F4zCS/iIqnDupzGUH9TQPwiNHleI2lKk/2lw0Xd8rY
------END CERTIFICATE-----
-
-Verisign Class 3 Public Primary Certification Authority
-=======================================================
-
------BEGIN CERTIFICATE-----
-MIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkG
-A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFz
-cyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2
-MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVowXzELMAkGA1UEBhMCVVMxFzAVBgNV
-BAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmlt
-YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GN
-ADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhE
-BarsAx94f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/is
-I19wKTakyYbnsZogy1Olhec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0G
-CSqGSIb3DQEBAgUAA4GBALtMEivPLCYATxQT3ab7/AoRhIzzKBxnki98tsX63/Do
-lbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59AhWM1pF+NEHJwZRDmJXNyc
-AA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2OmufTqj/ZA1k
------END CERTIFICATE-----
-
-Verisign Class 1 Public Primary Certification Authority - G2
-============================================================
-
------BEGIN CERTIFICATE-----
-MIIDAjCCAmsCEEzH6qqYPnHTkxD4PTqJkZIwDQYJKoZIhvcNAQEFBQAwgcExCzAJ
-BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xh
-c3MgMSBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcy
-MTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3Jp
-emVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMB4X
-DTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVTMRcw
-FQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMSBQdWJsaWMg
-UHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEo
-YykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5
-MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEB
-AQUAA4GNADCBiQKBgQCq0Lq+Fi24g9TK0g+8djHKlNgdk4xWArzZbxpvUjZudVYK
-VdPfQ4chEWWKfo+9Id5rMj8bhDSVBZ1BNeuS65bdqlk/AVNtmU/t5eIqWpDBucSm
-Fc/IReumXY6cPvBkJHalzasab7bYe1FhbqZ/h8jit+U03EGI6glAvnOSPWvndQID
-AQABMA0GCSqGSIb3DQEBBQUAA4GBAKlPww3HZ74sy9mozS11534Vnjty637rXC0J
-h9ZrbWB85a7FkCMMXErQr7Fd88e2CtvgFZMN3QO8x3aKtd1Pw5sTdbgBwObJW2ul
-uIncrKTdcu1OofdPvAbT6shkdHvClUGcZXNY8ZCaPGqxmMnEh7zPRW1F4m4iP/68
-DzFc6PLZ
------END CERTIFICATE-----
-
-Verisign Class 2 Public Primary Certification Authority - G2
-============================================================
-
------BEGIN CERTIFICATE-----
-MIIDAzCCAmwCEQC5L2DMiJ+hekYJuFtwbIqvMA0GCSqGSIb3DQEBBQUAMIHBMQsw
-CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xPDA6BgNVBAsTM0Ns
-YXNzIDIgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH
-MjE6MDgGA1UECxMxKGMpIDE5OTggVmVyaVNpZ24sIEluYy4gLSBGb3IgYXV0aG9y
-aXplZCB1c2Ugb25seTEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1c3QgTmV0d29yazAe
-Fw05ODA1MTgwMDAwMDBaFw0yODA4MDEyMzU5NTlaMIHBMQswCQYDVQQGEwJVUzEX
-MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xPDA6BgNVBAsTM0NsYXNzIDIgUHVibGlj
-IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjE6MDgGA1UECxMx
-KGMpIDE5OTggVmVyaVNpZ24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s
-eTEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1c3QgTmV0d29yazCBnzANBgkqhkiG9w0B
-AQEFAAOBjQAwgYkCgYEAp4gBIXQs5xoD8JjhlzwPIQjxnNuX6Zr8wgQGE75fUsjM
-HiwSViy4AWkszJkfrbCWrnkE8hM5wXuYuggs6MKEEyyqaekJ9MepAqRCwiNPStjw
-DqL7MWzJ5m+ZJwf15vRMeJ5t60aG+rmGyVTyssSv1EYcWskVMP8NbPUtDm3Of3cC
-AwEAATANBgkqhkiG9w0BAQUFAAOBgQByLvl/0fFx+8Se9sVeUYpAmLho+Jscg9ji
-nb3/7aHmZuovCfTK1+qlK5X2JGCGTUQug6XELaDTrnhpb3LabK4I8GOSN+a7xDAX
-rXfMSTWqz9iP0b63GJZHc2pUIjRkLbYWm1lbtFFZOrMLFPQS32eg9K0yZF6xRnIn
-jBJ7xUS0rg==
------END CERTIFICATE-----
-
-Verisign Class 3 Public Primary Certification Authority - G2
-============================================================
-
------BEGIN CERTIFICATE-----
-MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJ
-BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xh
-c3MgMyBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcy
-MTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3Jp
-emVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMB4X
-DTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVTMRcw
-FQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMg
-UHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEo
-YykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5
-MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEB
-AQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCOFoUgRm1HP9SFIIThbbP4
-pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71lSk8UOg0
-13gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwID
-AQABMA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSk
-U01UbSuvDV1Ai2TT1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7i
-F6YM40AIOw7n60RzKprxaZLvcRTDOaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpY
-oJ2daZH9
------END CERTIFICATE-----
-
-Verisign Class 4 Public Primary Certification Authority - G2
-============================================================
-
------BEGIN CERTIFICATE-----
-MIIDAjCCAmsCEDKIjprS9esTR/h/xCA3JfgwDQYJKoZIhvcNAQEFBQAwgcExCzAJ
-BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xh
-c3MgNCBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcy
-MTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3Jp
-emVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMB4X
-DTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVTMRcw
-FQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgNCBQdWJsaWMg
-UHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEo
-YykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5
-MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEB
-AQUAA4GNADCBiQKBgQC68OTP+cSuhVS5B1f5j8V/aBH4xBewRNzjMHPVKmIquNDM
-HO0oW369atyzkSTKQWI8/AIBvxwWMZQFl3Zuoq29YRdsTjCG8FE3KlDHqGKB3FtK
-qsGgtG7rL+VXxbErQHDbWk2hjh+9Ax/YA9SPTJlxvOKCzFjomDqG04Y48wApHwID
-AQABMA0GCSqGSIb3DQEBBQUAA4GBAIWMEsGnuVAVess+rLhDityq3RS6iYF+ATwj
-cSGIL4LcY/oCRaxFWdcqWERbt5+BO5JoPeI3JPV7bI92NZYJqFmduc4jq3TWg/0y
-cyfYaT5DdPauxYma51N86Xv2S/PBZYPejYqcPIiNOVn8qj8ijaHBZlCBckztImRP
-T8qAkbYp
------END CERTIFICATE-----
-
-Verisign Class 1 Public Primary Certification Authority - G3
-============================================================
-
------BEGIN CERTIFICATE-----
-MIIEGjCCAwICEQCLW3VWhFSFCwDPrzhIzrGkMA0GCSqGSIb3DQEBBQUAMIHKMQsw
-CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
-cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
-LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
-aWduIENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
-dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
-VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
-aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
-bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
-IENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
-LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN2E1Lm0+afY8wR4
-nN493GwTFtl63SRRZsDHJlkNrAYIwpTRMx/wgzUfbhvI3qpuFU5UJ+/EbRrsC+MO
-8ESlV8dAWB6jRx9x7GD2bZTIGDnt/kIYVt/kTEkQeE4BdjVjEjbdZrwBBDajVWjV
-ojYJrKshJlQGrT/KFOCsyq0GHZXi+J3x4GD/wn91K0zM2v6HmSHquv4+VNfSWXjb
-PG7PoBMAGrgnoeS+Z5bKoMWznN3JdZ7rMJpfo83ZrngZPyPpXNspva1VyBtUjGP2
-6KbqxzcSXKMpHgLZ2x87tNcPVkeBFQRKr4Mn0cVYiMHd9qqnoxjaaKptEVHhv2Vr
-n5Z20T0CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAq2aN17O6x5q25lXQBfGfMY1a
-qtmqRiYPce2lrVNWYgFHKkTp/j90CxObufRNG7LRX7K20ohcs5/Ny9Sn2WCVhDr4
-wTcdYcrnsMXlkdpUpqwxga6X3s0IrLjAl4B/bnKk52kTlWUfxJM8/XmPBNQ+T+r3
-ns7NZ3xPZQL/kYVUc8f/NveGLezQXk//EZ9yBta4GvFMDSZl4kSAHsef493oCtrs
-pSCAaWihT37ha88HQfqDjrw43bAuEbFrskLMmrz5SCJ5ShkPshw+IHTZasO+8ih4
-E1Z5T21Q6huwtVexN2ZYI/PcD98Kh8TvhgXVOBRgmaNL3gaWcSzy27YfpO8/7g==
------END CERTIFICATE-----
-
-Verisign Class 2 Public Primary Certification Authority - G3
-============================================================
-
------BEGIN CERTIFICATE-----
-MIIEGTCCAwECEGFwy0mMX5hFKeewptlQW3owDQYJKoZIhvcNAQEFBQAwgcoxCzAJ
-BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVy
-aVNpZ24gVHJ1c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24s
-IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNp
-Z24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
-eSAtIEczMB4XDTk5MTAwMTAwMDAwMFoXDTM2MDcxNjIzNTk1OVowgcoxCzAJBgNV
-BAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNp
-Z24gVHJ1c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIElu
-Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNpZ24g
-Q2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt
-IEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArwoNwtUs22e5LeWU
-J92lvuCwTY+zYVY81nzD9M0+hsuiiOLh2KRpxbXiv8GmR1BeRjmL1Za6tW8UvxDO
-JxOeBUebMXoT2B/Z0wI3i60sR/COgQanDTAM6/c8DyAd3HJG7qUCyFvDyVZpTMUY
-wZF7C9UTAJu878NIPkZgIIUq1ZC2zYugzDLdt/1AVbJQHFauzI13TccgTacxdu9o
-koqQHgiBVrKtaaNS0MscxCM9H5n+TOgWY47GCI72MfbS+uV23bUckqNJzc0BzWjN
-qWm6o+sdDZykIKbBoMXRRkwXbdKsZj+WjOCE1Db/IlnF+RFgqF8EffIa9iVCYQ/E
-Srg+iQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA0JhU8wI1NQ0kdvekhktdmnLfe
-xbjQ5F1fdiLAJvmEOjr5jLX77GDx6M4EsMjdpwOPMPOY36TmpDHf0xwLRtxyID+u
-7gU8pDM/CzmscHhzS5kr3zDCVLCoO1Wh/hYozUK9dG6A2ydEp85EXdQbkJgNHkKU
-sQAsBNB0owIFImNjzYO1+8FtYmtpdf1dcEG59b98377BMnMiIYtYgXsVkXq642RI
-sH/7NiXaldDxJBQX3RiAa0YjOVT1jmIJBB2UkKab5iXiQkWquJCtvgiPqQtCGJTP
-cjnhsUPgKM+351psE2tJs//jGHyJizNdrDPXp/naOlXJWBD5qu9ats9LS98q
------END CERTIFICATE-----
-
-Verisign Class 3 Public Primary Certification Authority - G3
-============================================================
-
------BEGIN CERTIFICATE-----
-MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
-CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
-cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
-LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
-aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
-dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
-VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
-aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
-bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
-IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
-LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b
-N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t
-KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu
-kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm
-CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ
-Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu
-imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te
-2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe
-DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC
-/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p
-F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt
-TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
------END CERTIFICATE-----
-
-Verisign Class 4 Public Primary Certification Authority - G3
-============================================================
-
------BEGIN CERTIFICATE-----
-MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
-CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
-cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
-LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
-aWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
-dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
-VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
-aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
-bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
-IENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
-LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1
-GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ
-+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd
-U6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm
-NxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY
-ufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/
-ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1
-CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq
-g6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm
-fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c
-2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/
-bLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg==
------END CERTIFICATE-----
-
-Equifax Secure Global eBusiness CA
-==================================
-
------BEGIN CERTIFICATE-----
-MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEc
-MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBT
-ZWN1cmUgR2xvYmFsIGVCdXNpbmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIw
-MDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0VxdWlmYXggU2Vj
-dXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEdsb2JhbCBlQnVzaW5l
-c3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRVPEnC
-UdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc
-58O/gGzNqfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/
-o5brhTMhHD4ePmBudpxnhcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAH
-MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUvqigdHJQa0S3ySPY+6j/s1dr
-aGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hsMA0GCSqGSIb3DQEBBAUA
-A4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okENI7SS+RkA
-Z70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv
-8qIYNMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV
------END CERTIFICATE-----
-
-Equifax Secure eBusiness CA 1
-=============================
-
------BEGIN CERTIFICATE-----
-MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEc
-MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBT
-ZWN1cmUgZUJ1c2luZXNzIENBLTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQw
-MDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5j
-LjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENBLTEwgZ8wDQYJ
-KoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ1MRo
-RvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBu
-WqDZQu4aIZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKw
-Env+j6YDAgMBAAGjZjBkMBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTAD
-AQH/MB8GA1UdIwQYMBaAFEp4MlIR21kWNl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRK
-eDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQFAAOBgQB1W6ibAxHm6VZM
-zfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5lSE/9dR+
-WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN
-/Bf+KpYrtWKmpj29f5JZzVoqgrI3eQ==
------END CERTIFICATE-----
-
-Equifax Secure eBusiness CA 2
-=============================
-
------BEGIN CERTIFICATE-----
-MIIDIDCCAomgAwIBAgIEN3DPtTANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV
-UzEXMBUGA1UEChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2Vj
-dXJlIGVCdXNpbmVzcyBDQS0yMB4XDTk5MDYyMzEyMTQ0NVoXDTE5MDYyMzEyMTQ0
-NVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkVxdWlmYXggU2VjdXJlMSYwJAYD
-VQQLEx1FcXVpZmF4IFNlY3VyZSBlQnVzaW5lc3MgQ0EtMjCBnzANBgkqhkiG9w0B
-AQEFAAOBjQAwgYkCgYEA5Dk5kx5SBhsoNviyoynF7Y6yEb3+6+e0dMKP/wXn2Z0G
-vxLIPw7y1tEkshHe0XMJitSxLJgJDR5QRrKDpkWNYmi7hRsgcDKqQM2mll/EcTc/
-BPO3QSQ5BxoeLmFYoBIL5aXfxavqN3HMHMg3OrmXUqesxWoklE6ce8/AatbfIb0C
-AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEX
-MBUGA1UEChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2VjdXJl
-IGVCdXNpbmVzcyBDQS0yMQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTkw
-NjIzMTIxNDQ1WjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUUJ4L6q9euSBIplBq
-y/3YIHqngnYwHQYDVR0OBBYEFFCeC+qvXrkgSKZQasv92CB6p4J2MAwGA1UdEwQF
-MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA
-A4GBAAyGgq3oThr1jokn4jVYPSm0B482UJW/bsGe68SQsoWou7dC4A8HOd/7npCy
-0cE+U58DRLB+S/Rv5Hwf5+Kx5Lia78O9zt4LMjTZ3ijtM2vE1Nc9ElirfQkty3D1
-E4qUoSek1nDFbZS1yX2doNLGCEnZZpum0/QL3MUmV+GRMOrN
------END CERTIFICATE-----
-
-Thawte Time Stamping CA
-=======================
-
------BEGIN CERTIFICATE-----
-MIICoTCCAgqgAwIBAgIBADANBgkqhkiG9w0BAQQFADCBizELMAkGA1UEBhMCWkEx
-FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTEUMBIGA1UEBxMLRHVyYmFudmlsbGUxDzAN
-BgNVBAoTBlRoYXd0ZTEdMBsGA1UECxMUVGhhd3RlIENlcnRpZmljYXRpb24xHzAd
-BgNVBAMTFlRoYXd0ZSBUaW1lc3RhbXBpbmcgQ0EwHhcNOTcwMTAxMDAwMDAwWhcN
-MjAxMjMxMjM1OTU5WjCBizELMAkGA1UEBhMCWkExFTATBgNVBAgTDFdlc3Rlcm4g
-Q2FwZTEUMBIGA1UEBxMLRHVyYmFudmlsbGUxDzANBgNVBAoTBlRoYXd0ZTEdMBsG
-A1UECxMUVGhhd3RlIENlcnRpZmljYXRpb24xHzAdBgNVBAMTFlRoYXd0ZSBUaW1l
-c3RhbXBpbmcgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANYrWHhhRYZT
-6jR7UZztsOYuGA7+4F+oJ9O0yeB8WU4WDnNUYMF/9p8u6TqFJBU820cEY8OexJQa
-Wt9MevPZQx08EHp5JduQ/vBR5zDWQQD9nyjfeb6Uu522FOMjhdepQeBMpHmwKxqL
-8vg7ij5FrHGSALSQQZj7X+36ty6K+Ig3AgMBAAGjEzARMA8GA1UdEwEB/wQFMAMB
-Af8wDQYJKoZIhvcNAQEEBQADgYEAZ9viwuaHPUCDhjc1fR/OmsMMZiCouqoEiYbC
-9RAIDb/LogWK0E02PvTX72nGXuSwlG9KuefeW4i2e9vjJ+V2w/A1wcu1J5szedyQ
-pgCed/r8zSeUQhac0xxo7L9c3eWpexAKMnRUEzGLhQOEkbdYATAUOK8oyvyxUBkZ
-CayJSdM=
------END CERTIFICATE-----
-
-thawte Primary Root CA
-======================
-
------BEGIN CERTIFICATE-----
-MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB
-qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
-Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
-MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV
-BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw
-NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j
-LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG
-A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
-IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG
-SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs
-W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta
-3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk
-6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6
-Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J
-NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA
-MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP
-r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU
-DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz
-YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
-xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2
-/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/
-LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7
-jVaMaA==
------END CERTIFICATE-----
-
-VeriSign Class 3 Public Primary Certification Authority - G5
-============================================================
-
------BEGIN CERTIFICATE-----
-MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB
-yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
-ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp
-U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW
-ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0
-aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL
-MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
-ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln
-biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
-U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
-aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1
-nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex
-t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz
-SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG
-BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+
-rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/
-NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E
-BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH
-BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy
-aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv
-MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE
-p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y
-5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK
-WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ
-4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N
-hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq
------END CERTIFICATE-----
-
-Entrust.net Secure Server Certification Authority
-=================================================
-
------BEGIN CERTIFICATE-----
-MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMC
-VVMxFDASBgNVBAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5u
-ZXQvQ1BTIGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMc
-KGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDE6MDgGA1UEAxMxRW50cnVzdC5u
-ZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw05OTA1
-MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIGA1UE
-ChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5j
-b3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF
-bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUg
-U2VydmVyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUA
-A4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQaO2f55M28Qpku0f1BBc/
-I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5gXpa0zf3
-wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OC
-AdcwggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHb
-oIHYpIHVMIHSMQswCQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5
-BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1p
-dHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1pdGVk
-MTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRp
-b24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu
-dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0
-MFqBDzIwMTkwNTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8Bdi
-E1U9s/8KAGv7UISX8+1i0BowHQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAa
-MAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EABAwwChsEVjQuMAMCBJAwDQYJKoZI
-hvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyNEwr75Ji174z4xRAN
-95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9n9cd
-2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI=
------END CERTIFICATE-----
-
-Go Daddy Certification Authority Root Certificate Bundle
-========================================================
-
------BEGIN CERTIFICATE-----
-MIIE3jCCA8agAwIBAgICAwEwDQYJKoZIhvcNAQEFBQAwYzELMAkGA1UEBhMCVVMx
-ITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g
-RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMTYw
-MTU0MzdaFw0yNjExMTYwMTU0MzdaMIHKMQswCQYDVQQGEwJVUzEQMA4GA1UECBMH
-QXJpem9uYTETMBEGA1UEBxMKU2NvdHRzZGFsZTEaMBgGA1UEChMRR29EYWRkeS5j
-b20sIEluYy4xMzAxBgNVBAsTKmh0dHA6Ly9jZXJ0aWZpY2F0ZXMuZ29kYWRkeS5j
-b20vcmVwb3NpdG9yeTEwMC4GA1UEAxMnR28gRGFkZHkgU2VjdXJlIENlcnRpZmlj
-YXRpb24gQXV0aG9yaXR5MREwDwYDVQQFEwgwNzk2OTI4NzCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAMQt1RWMnCZM7DI161+4WQFapmGBWTtwY6vj3D3H
-KrjJM9N55DrtPDAjhI6zMBS2sofDPZVUBJ7fmd0LJR4h3mUpfjWoqVTr9vcyOdQm
-VZWt7/v+WIbXnvQAjYwqDL1CBM6nPwT27oDyqu9SoWlm2r4arV3aLGbqGmu75RpR
-SgAvSMeYddi5Kcju+GZtCpyz8/x4fKL4o/K1w/O5epHBp+YlLpyo7RJlbmr2EkRT
-cDCVw5wrWCs9CHRK8r5RsL+H0EwnWGu1NcWdrxcx+AuP7q2BNgWJCJjPOq8lh8BJ
-6qf9Z/dFjpfMFDniNoW1fho3/Rb2cRGadDAW/hOUoz+EDU8CAwEAAaOCATIwggEu
-MB0GA1UdDgQWBBT9rGEyk2xF1uLuhV+auud2mWjM5zAfBgNVHSMEGDAWgBTSxLDS
-kdRMEXGzYcs9of7dqGrU4zASBgNVHRMBAf8ECDAGAQH/AgEAMDMGCCsGAQUFBwEB
-BCcwJTAjBggrBgEFBQcwAYYXaHR0cDovL29jc3AuZ29kYWRkeS5jb20wRgYDVR0f
-BD8wPTA7oDmgN4Y1aHR0cDovL2NlcnRpZmljYXRlcy5nb2RhZGR5LmNvbS9yZXBv
-c2l0b3J5L2dkcm9vdC5jcmwwSwYDVR0gBEQwQjBABgRVHSAAMDgwNgYIKwYBBQUH
-AgEWKmh0dHA6Ly9jZXJ0aWZpY2F0ZXMuZ29kYWRkeS5jb20vcmVwb3NpdG9yeTAO
-BgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBANKGwOy9+aG2Z+5mC6IG
-OgRQjhVyrEp0lVPLN8tESe8HkGsz2ZbwlFalEzAFPIUyIXvJxwqoJKSQ3kbTJSMU
-A2fCENZvD117esyfxVgqwcSeIaha86ykRvOe5GPLL5CkKSkB2XIsKd83ASe8T+5o
-0yGPwLPk9Qnt0hCqU7S+8MxZC9Y7lhyVJEnfzuz9p0iRFEUOOjZv2kWzRaJBydTX
-RE4+uXR21aITVSzGh6O1mawGhId/dQb8vxRMDsxuxN89txJx9OjxUUAiKEngHUuH
-qDTMBqLdElrRhjZkAzVvb3du6/KFUJheqwNTrZEjYx8WnM25sgVjOuH0aBsXBTWV
-U+4=
------END CERTIFICATE-----
------BEGIN CERTIFICATE-----
-MIIE+zCCBGSgAwIBAgICAQ0wDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1Zh
-bGlDZXJ0IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIElu
-Yy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIgUG9saWN5IFZhbGlkYXRpb24g
-QXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAe
-BgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTA0MDYyOTE3MDYyMFoX
-DTI0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBE
-YWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3MgMiBDZXJ0
-aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggENADCCAQgC
-ggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv
-2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+q
-N1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiO
-r18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lN
-f4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+YihfukEH
-U1jPEX44dMX4/7VpkI+EdOqXG68CAQOjggHhMIIB3TAdBgNVHQ4EFgQU0sSw0pHU
-TBFxs2HLPaH+3ahq1OMwgdIGA1UdIwSByjCBx6GBwaSBvjCBuzEkMCIGA1UEBxMb
-VmFsaUNlcnQgVmFsaWRhdGlvbiBOZXR3b3JrMRcwFQYDVQQKEw5WYWxpQ2VydCwg
-SW5jLjE1MDMGA1UECxMsVmFsaUNlcnQgQ2xhc3MgMiBQb2xpY3kgVmFsaWRhdGlv
-biBBdXRob3JpdHkxITAfBgNVBAMTGGh0dHA6Ly93d3cudmFsaWNlcnQuY29tLzEg
-MB4GCSqGSIb3DQEJARYRaW5mb0B2YWxpY2VydC5jb22CAQEwDwYDVR0TAQH/BAUw
-AwEB/zAzBggrBgEFBQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly9vY3NwLmdv
-ZGFkZHkuY29tMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jZXJ0aWZpY2F0ZXMu
-Z29kYWRkeS5jb20vcmVwb3NpdG9yeS9yb290LmNybDBLBgNVHSAERDBCMEAGBFUd
-IAAwODA2BggrBgEFBQcCARYqaHR0cDovL2NlcnRpZmljYXRlcy5nb2RhZGR5LmNv
-bS9yZXBvc2l0b3J5MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOBgQC1
-QPmnHfbq/qQaQlpE9xXUhUaJwL6e4+PrxeNYiY+Sn1eocSxI0YGyeR+sBjUZsE4O
-WBsUs5iB0QQeyAfJg594RAoYC5jcdnplDQ1tgMQLARzLrUc+cb53S8wGd9D0Vmsf
-SxOaFIqII6hR8INMqzW/Rn453HWkrugp++85j09VZw==
------END CERTIFICATE-----
------BEGIN CERTIFICATE-----
-MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
-IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
-BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
-aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
-9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMTk1NFoXDTE5MDYy
-NjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
-azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
-YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
-Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
-cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDOOnHK5avIWZJV16vY
-dA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVCCSRrCl6zfN1SLUzm1NZ9
-WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7RfZHM047QS
-v4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9v
-UJSZSWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTu
-IYEZoDJJKPTEjlbVUjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwC
-W/POuZ6lcg5Ktz885hZo+L7tdEy8W9ViH0Pd
------END CERTIFICATE-----
-
-GeoTrust Global CA
-==================
-
------BEGIN CERTIFICATE-----
-MIIDfTCCAuagAwIBAgIDErvmMA0GCSqGSIb3DQEBBQUAME4xCzAJBgNVBAYTAlVT
-MRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0
-aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDIwNTIxMDQwMDAwWhcNMTgwODIxMDQwMDAw
-WjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UE
-AxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
-CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9m
-OSm9BXiLnTjoBbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIu
-T8rxh0PBFpVXLVDviS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6c
-JmTM386DGXHKTubU1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmR
-Cw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5asz
-PeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo4HwMIHtMB8GA1UdIwQYMBaAFEjm
-aPkr0rKV10fYIyAQTzOYkJ/UMB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrM
-TjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjA6BgNVHR8EMzAxMC+g
-LaArhilodHRwOi8vY3JsLmdlb3RydXN0LmNvbS9jcmxzL3NlY3VyZWNhLmNybDBO
-BgNVHSAERzBFMEMGBFUdIAAwOzA5BggrBgEFBQcCARYtaHR0cHM6Ly93d3cuZ2Vv
-dHJ1c3QuY29tL3Jlc291cmNlcy9yZXBvc2l0b3J5MA0GCSqGSIb3DQEBBQUAA4GB
-AHbhEm5OSxYShjAGsoEIz/AIx8dxfmbuwu3UOx//8PDITtZDOLC5MH0Y0FWDomrL
-NhGc6Ehmo21/uBPUR/6LWlxz/K7ZGzIZOKuXNBSqltLroxwUCEm2u+WR74M26x1W
-b8ravHNjkOR/ez4iyz0H7V84dJzjA1BOoa+Y7mHyhD8S
------END CERTIFICATE-----
-
diff --git a/python-packages/httplib2/iri2uri.py b/python-packages/httplib2/iri2uri.py
deleted file mode 100755
index d88c91fdfb..0000000000
--- a/python-packages/httplib2/iri2uri.py
+++ /dev/null
@@ -1,110 +0,0 @@
-"""
-iri2uri
-
-Converts an IRI to a URI.
-
-"""
-__author__ = "Joe Gregorio (joe@bitworking.org)"
-__copyright__ = "Copyright 2006, Joe Gregorio"
-__contributors__ = []
-__version__ = "1.0.0"
-__license__ = "MIT"
-__history__ = """
-"""
-
-import urlparse
-
-
-# Convert an IRI to a URI following the rules in RFC 3987
-#
-# The characters we need to enocde and escape are defined in the spec:
-#
-# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
-# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
-# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
-# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
-# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
-# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
-# / %xD0000-DFFFD / %xE1000-EFFFD
-
-escape_range = [
- (0xA0, 0xD7FF),
- (0xE000, 0xF8FF),
- (0xF900, 0xFDCF),
- (0xFDF0, 0xFFEF),
- (0x10000, 0x1FFFD),
- (0x20000, 0x2FFFD),
- (0x30000, 0x3FFFD),
- (0x40000, 0x4FFFD),
- (0x50000, 0x5FFFD),
- (0x60000, 0x6FFFD),
- (0x70000, 0x7FFFD),
- (0x80000, 0x8FFFD),
- (0x90000, 0x9FFFD),
- (0xA0000, 0xAFFFD),
- (0xB0000, 0xBFFFD),
- (0xC0000, 0xCFFFD),
- (0xD0000, 0xDFFFD),
- (0xE1000, 0xEFFFD),
- (0xF0000, 0xFFFFD),
- (0x100000, 0x10FFFD),
-]
-
-def encode(c):
- retval = c
- i = ord(c)
- for low, high in escape_range:
- if i < low:
- break
- if i >= low and i <= high:
- retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
- break
- return retval
-
-
-def iri2uri(uri):
- """Convert an IRI to a URI. Note that IRIs must be
- passed in a unicode strings. That is, do not utf-8 encode
- the IRI before passing it into the function."""
- if isinstance(uri ,unicode):
- (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
- authority = authority.encode('idna')
- # For each character in 'ucschar' or 'iprivate'
- # 1. encode as utf-8
- # 2. then %-encode each octet of that utf-8
- uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
- uri = "".join([encode(c) for c in uri])
- return uri
-
-if __name__ == "__main__":
- import unittest
-
- class Test(unittest.TestCase):
-
- def test_uris(self):
- """Test that URIs are invariant under the transformation."""
- invariant = [
- u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
- u"http://www.ietf.org/rfc/rfc2396.txt",
- u"ldap://[2001:db8::7]/c=GB?objectClass?one",
- u"mailto:John.Doe@example.com",
- u"news:comp.infosystems.www.servers.unix",
- u"tel:+1-816-555-1212",
- u"telnet://192.0.2.16:80/",
- u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
- for uri in invariant:
- self.assertEqual(uri, iri2uri(uri))
-
- def test_iri(self):
- """ Test that the right type of escaping is done for each part of the URI."""
- self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
- self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
- self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
- self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
- self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
- self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
- self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
-
- unittest.main()
-
-
diff --git a/python-packages/httplib2/socks.py b/python-packages/httplib2/socks.py
deleted file mode 100755
index 0991f4cf6e..0000000000
--- a/python-packages/httplib2/socks.py
+++ /dev/null
@@ -1,438 +0,0 @@
-"""SocksiPy - Python SOCKS module.
-Version 1.00
-
-Copyright 2006 Dan-Haim. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-1. Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-3. Neither the name of Dan Haim nor the names of his contributors may be used
- to endorse or promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
-OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
-
-
-This module provides a standard socket-like interface for Python
-for tunneling connections through SOCKS proxies.
-
-"""
-
-"""
-
-Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
-for use in PyLoris (http://pyloris.sourceforge.net/)
-
-Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
-mainly to merge bug fixes found in Sourceforge
-
-"""
-
-import base64
-import socket
-import struct
-import sys
-
-if getattr(socket, 'socket', None) is None:
- raise ImportError('socket.socket missing, proxy support unusable')
-
-PROXY_TYPE_SOCKS4 = 1
-PROXY_TYPE_SOCKS5 = 2
-PROXY_TYPE_HTTP = 3
-PROXY_TYPE_HTTP_NO_TUNNEL = 4
-
-_defaultproxy = None
-_orgsocket = socket.socket
-
-class ProxyError(Exception): pass
-class GeneralProxyError(ProxyError): pass
-class Socks5AuthError(ProxyError): pass
-class Socks5Error(ProxyError): pass
-class Socks4Error(ProxyError): pass
-class HTTPError(ProxyError): pass
-
-_generalerrors = ("success",
- "invalid data",
- "not connected",
- "not available",
- "bad proxy type",
- "bad input")
-
-_socks5errors = ("succeeded",
- "general SOCKS server failure",
- "connection not allowed by ruleset",
- "Network unreachable",
- "Host unreachable",
- "Connection refused",
- "TTL expired",
- "Command not supported",
- "Address type not supported",
- "Unknown error")
-
-_socks5autherrors = ("succeeded",
- "authentication is required",
- "all offered authentication methods were rejected",
- "unknown username or invalid password",
- "unknown error")
-
-_socks4errors = ("request granted",
- "request rejected or failed",
- "request rejected because SOCKS server cannot connect to identd on the client",
- "request rejected because the client program and identd report different user-ids",
- "unknown error")
-
-def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
- """setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
- Sets a default proxy which all further socksocket objects will use,
- unless explicitly changed.
- """
- global _defaultproxy
- _defaultproxy = (proxytype, addr, port, rdns, username, password)
-
-def wrapmodule(module):
- """wrapmodule(module)
- Attempts to replace a module's socket library with a SOCKS socket. Must set
- a default proxy using setdefaultproxy(...) first.
- This will only work on modules that import socket directly into the namespace;
- most of the Python Standard Library falls into this category.
- """
- if _defaultproxy != None:
- module.socket.socket = socksocket
- else:
- raise GeneralProxyError((4, "no proxy specified"))
-
-class socksocket(socket.socket):
- """socksocket([family[, type[, proto]]]) -> socket object
- Open a SOCKS enabled socket. The parameters are the same as
- those of the standard socket init. In order for SOCKS to work,
- you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
- """
-
- def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
- _orgsocket.__init__(self, family, type, proto, _sock)
- if _defaultproxy != None:
- self.__proxy = _defaultproxy
- else:
- self.__proxy = (None, None, None, None, None, None)
- self.__proxysockname = None
- self.__proxypeername = None
- self.__httptunnel = True
-
- def __recvall(self, count):
- """__recvall(count) -> data
- Receive EXACTLY the number of bytes requested from the socket.
- Blocks until the required number of bytes have been received.
- """
- data = self.recv(count)
- while len(data) < count:
- d = self.recv(count-len(data))
- if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
- data = data + d
- return data
-
- def sendall(self, content, *args):
- """ override socket.socket.sendall method to rewrite the header
- for non-tunneling proxies if needed
- """
- if not self.__httptunnel:
- content = self.__rewriteproxy(content)
- return super(socksocket, self).sendall(content, *args)
-
- def __rewriteproxy(self, header):
- """ rewrite HTTP request headers to support non-tunneling proxies
- (i.e. those which do not support the CONNECT method).
- This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
- """
- host, endpt = None, None
- hdrs = header.split("\r\n")
- for hdr in hdrs:
- if hdr.lower().startswith("host:"):
- host = hdr
- elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
- endpt = hdr
- if host and endpt:
- hdrs.remove(host)
- hdrs.remove(endpt)
- host = host.split(" ")[1]
- endpt = endpt.split(" ")
- if (self.__proxy[4] != None and self.__proxy[5] != None):
- hdrs.insert(0, self.__getauthheader())
- hdrs.insert(0, "Host: %s" % host)
- hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
- return "\r\n".join(hdrs)
-
- def __getauthheader(self):
- auth = self.__proxy[4] + ":" + self.__proxy[5]
- return "Proxy-Authorization: Basic " + base64.b64encode(auth)
-
- def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
- """setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
- Sets the proxy to be used.
- proxytype - The type of the proxy to be used. Three types
- are supported: PROXY_TYPE_SOCKS4 (including socks4a),
- PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
- addr - The address of the server (IP or DNS).
- port - The port of the server. Defaults to 1080 for SOCKS
- servers and 8080 for HTTP proxy servers.
- rdns - Should DNS queries be preformed on the remote side
- (rather than the local side). The default is True.
- Note: This has no effect with SOCKS4 servers.
- username - Username to authenticate with to the server.
- The default is no authentication.
- password - Password to authenticate with to the server.
- Only relevant when username is also provided.
- """
- self.__proxy = (proxytype, addr, port, rdns, username, password)
-
- def __negotiatesocks5(self, destaddr, destport):
- """__negotiatesocks5(self,destaddr,destport)
- Negotiates a connection through a SOCKS5 server.
- """
- # First we'll send the authentication packages we support.
- if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
- # The username/password details were supplied to the
- # setproxy method so we support the USERNAME/PASSWORD
- # authentication (in addition to the standard none).
- self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
- else:
- # No username/password were entered, therefore we
- # only support connections with no authentication.
- self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
- # We'll receive the server's response to determine which
- # method was selected
- chosenauth = self.__recvall(2)
- if chosenauth[0:1] != chr(0x05).encode():
- self.close()
- raise GeneralProxyError((1, _generalerrors[1]))
- # Check the chosen authentication method
- if chosenauth[1:2] == chr(0x00).encode():
- # No authentication is required
- pass
- elif chosenauth[1:2] == chr(0x02).encode():
- # Okay, we need to perform a basic username/password
- # authentication.
- self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
- authstat = self.__recvall(2)
- if authstat[0:1] != chr(0x01).encode():
- # Bad response
- self.close()
- raise GeneralProxyError((1, _generalerrors[1]))
- if authstat[1:2] != chr(0x00).encode():
- # Authentication failed
- self.close()
- raise Socks5AuthError((3, _socks5autherrors[3]))
- # Authentication succeeded
- else:
- # Reaching here is always bad
- self.close()
- if chosenauth[1] == chr(0xFF).encode():
- raise Socks5AuthError((2, _socks5autherrors[2]))
- else:
- raise GeneralProxyError((1, _generalerrors[1]))
- # Now we can request the actual connection
- req = struct.pack('BBB', 0x05, 0x01, 0x00)
- # If the given destination address is an IP address, we'll
- # use the IPv4 address request even if remote resolving was specified.
- try:
- ipaddr = socket.inet_aton(destaddr)
- req = req + chr(0x01).encode() + ipaddr
- except socket.error:
- # Well it's not an IP number, so it's probably a DNS name.
- if self.__proxy[3]:
- # Resolve remotely
- ipaddr = None
- req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
- else:
- # Resolve locally
- ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
- req = req + chr(0x01).encode() + ipaddr
- req = req + struct.pack(">H", destport)
- self.sendall(req)
- # Get the response
- resp = self.__recvall(4)
- if resp[0:1] != chr(0x05).encode():
- self.close()
- raise GeneralProxyError((1, _generalerrors[1]))
- elif resp[1:2] != chr(0x00).encode():
- # Connection failed
- self.close()
- if ord(resp[1:2])<=8:
- raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
- else:
- raise Socks5Error((9, _socks5errors[9]))
- # Get the bound address/port
- elif resp[3:4] == chr(0x01).encode():
- boundaddr = self.__recvall(4)
- elif resp[3:4] == chr(0x03).encode():
- resp = resp + self.recv(1)
- boundaddr = self.__recvall(ord(resp[4:5]))
- else:
- self.close()
- raise GeneralProxyError((1,_generalerrors[1]))
- boundport = struct.unpack(">H", self.__recvall(2))[0]
- self.__proxysockname = (boundaddr, boundport)
- if ipaddr != None:
- self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
- else:
- self.__proxypeername = (destaddr, destport)
-
- def getproxysockname(self):
- """getsockname() -> address info
- Returns the bound IP address and port number at the proxy.
- """
- return self.__proxysockname
-
- def getproxypeername(self):
- """getproxypeername() -> address info
- Returns the IP and port number of the proxy.
- """
- return _orgsocket.getpeername(self)
-
- def getpeername(self):
- """getpeername() -> address info
- Returns the IP address and port number of the destination
- machine (note: getproxypeername returns the proxy)
- """
- return self.__proxypeername
-
- def __negotiatesocks4(self,destaddr,destport):
- """__negotiatesocks4(self,destaddr,destport)
- Negotiates a connection through a SOCKS4 server.
- """
- # Check if the destination address provided is an IP address
- rmtrslv = False
- try:
- ipaddr = socket.inet_aton(destaddr)
- except socket.error:
- # It's a DNS name. Check where it should be resolved.
- if self.__proxy[3]:
- ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
- rmtrslv = True
- else:
- ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
- # Construct the request packet
- req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
- # The username parameter is considered userid for SOCKS4
- if self.__proxy[4] != None:
- req = req + self.__proxy[4]
- req = req + chr(0x00).encode()
- # DNS name if remote resolving is required
- # NOTE: This is actually an extension to the SOCKS4 protocol
- # called SOCKS4A and may not be supported in all cases.
- if rmtrslv:
- req = req + destaddr + chr(0x00).encode()
- self.sendall(req)
- # Get the response from the server
- resp = self.__recvall(8)
- if resp[0:1] != chr(0x00).encode():
- # Bad data
- self.close()
- raise GeneralProxyError((1,_generalerrors[1]))
- if resp[1:2] != chr(0x5A).encode():
- # Server returned an error
- self.close()
- if ord(resp[1:2]) in (91, 92, 93):
- self.close()
- raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
- else:
- raise Socks4Error((94, _socks4errors[4]))
- # Get the bound address/port
- self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
- if rmtrslv != None:
- self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
- else:
- self.__proxypeername = (destaddr, destport)
-
- def __negotiatehttp(self, destaddr, destport):
- """__negotiatehttp(self,destaddr,destport)
- Negotiates a connection through an HTTP server.
- """
- # If we need to resolve locally, we do this now
- if not self.__proxy[3]:
- addr = socket.gethostbyname(destaddr)
- else:
- addr = destaddr
- headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
- headers += ["Host: ", destaddr, "\r\n"]
- if (self.__proxy[4] != None and self.__proxy[5] != None):
- headers += [self.__getauthheader(), "\r\n"]
- headers.append("\r\n")
- self.sendall("".join(headers).encode())
- # We read the response until we get the string "\r\n\r\n"
- resp = self.recv(1)
- while resp.find("\r\n\r\n".encode()) == -1:
- resp = resp + self.recv(1)
- # We just need the first line to check if the connection
- # was successful
- statusline = resp.splitlines()[0].split(" ".encode(), 2)
- if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
- self.close()
- raise GeneralProxyError((1, _generalerrors[1]))
- try:
- statuscode = int(statusline[1])
- except ValueError:
- self.close()
- raise GeneralProxyError((1, _generalerrors[1]))
- if statuscode != 200:
- self.close()
- raise HTTPError((statuscode, statusline[2]))
- self.__proxysockname = ("0.0.0.0", 0)
- self.__proxypeername = (addr, destport)
-
- def connect(self, destpair):
- """connect(self, despair)
- Connects to the specified destination through a proxy.
- destpar - A tuple of the IP/DNS address and the port number.
- (identical to socket's connect).
- To select the proxy server use setproxy().
- """
- # Do a minimal input check first
- if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (not isinstance(destpair[0], basestring)) or (type(destpair[1]) != int):
- raise GeneralProxyError((5, _generalerrors[5]))
- if self.__proxy[0] == PROXY_TYPE_SOCKS5:
- if self.__proxy[2] != None:
- portnum = self.__proxy[2]
- else:
- portnum = 1080
- _orgsocket.connect(self, (self.__proxy[1], portnum))
- self.__negotiatesocks5(destpair[0], destpair[1])
- elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
- if self.__proxy[2] != None:
- portnum = self.__proxy[2]
- else:
- portnum = 1080
- _orgsocket.connect(self,(self.__proxy[1], portnum))
- self.__negotiatesocks4(destpair[0], destpair[1])
- elif self.__proxy[0] == PROXY_TYPE_HTTP:
- if self.__proxy[2] != None:
- portnum = self.__proxy[2]
- else:
- portnum = 8080
- _orgsocket.connect(self,(self.__proxy[1], portnum))
- self.__negotiatehttp(destpair[0], destpair[1])
- elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
- if self.__proxy[2] != None:
- portnum = self.__proxy[2]
- else:
- portnum = 8080
- _orgsocket.connect(self,(self.__proxy[1],portnum))
- if destpair[1] == 443:
- self.__negotiatehttp(destpair[0],destpair[1])
- else:
- self.__httptunnel = False
- elif self.__proxy[0] == None:
- _orgsocket.connect(self, (destpair[0], destpair[1]))
- else:
- raise GeneralProxyError((4, _generalerrors[4]))
diff --git a/python-packages/httreplay/__init__.py b/python-packages/httreplay/__init__.py
deleted file mode 100644
index 90079fbf23..0000000000
--- a/python-packages/httreplay/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from .patch import start_replay, stop_replay
-from .context import replay
-from .utils import sort_string, sort_string_key
-from .utils import filter_query_params, filter_query_params_key
-from .utils import filter_headers, filter_headers_key
-
-__title__ = 'httreplay'
-__version__ = '0.1.6'
-__build__ = 0x000106
-__author__ = 'Aron Griffis, Dave Peck'
-__license__ = 'MIT'
-__copyright__ = 'Copyright 2013 Aron Griffis and Dave Peck'
diff --git a/python-packages/httreplay/context.py b/python-packages/httreplay/context.py
deleted file mode 100644
index a06be666f4..0000000000
--- a/python-packages/httreplay/context.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from contextlib import contextmanager
-from .patch import start_replay, stop_replay
-
-
-@contextmanager
-def replay(recording_file_name, url_key=None, body_key=None, headers_key=None):
- """
- A simple context manager for using the ``httreplay`` library.
-
- On entry, patches the various supported HTTP-requesting libraries
- (httplib, requests, urllib3) and starts reading from/writing
- to the replay file on disk.
-
- On exit, undoes all patches and ends replay.
-
- Example:
-
- with replay('/tmp/my_recording.json'):
- ... perform http requests ...
-
- Because HTTP requests and responses may contain sensitive data,
- and because they may vary in inconsequential ways that you may
- wish to ignore, the ``httreplay`` provides several hooks to "filter"
- the request contents to generate a stable key suitable for your
- needs. Some example "filters" may be found in the ``utils.py`` file,
- which is currently a grab-bag of things the ``httreplay`` author
- has found useful, no matter how silly.
-
- :param replay_file_name: The file from which to load and save replays.
- :type replay_file_name: string
- :param url_key: Function that generates a stable key from a URL.
- :type url_key: function
- :param body_key: Function that generates a stable key from a
- request body.
- :type body_key: function
- :param headers_key: Function that generates a stable key from a
- dictionary of headers.
- :type headers_key: function
- """
- start_replay(
- recording_file_name,
- url_key=url_key,
- body_key=body_key,
- headers_key=headers_key)
- yield
- stop_replay()
diff --git a/python-packages/httreplay/patch.py b/python-packages/httreplay/patch.py
deleted file mode 100644
index c8485883e0..0000000000
--- a/python-packages/httreplay/patch.py
+++ /dev/null
@@ -1,168 +0,0 @@
-import httplib
-from .replay_settings import ReplaySettings
-from stubs.base import ReplayHTTPConnection, ReplayHTTPSConnection
-
-
-#------------------------------------------------------------------------------
-# Hold onto original objects for un-patching later
-#------------------------------------------------------------------------------
-
-_original_http_connection = httplib.HTTPConnection
-_original_https_connection = httplib.HTTPSConnection
-
-try:
- import requests
- import requests.packages.urllib3.connectionpool
- _original_requests_verified_https_connection = \
- requests.packages.urllib3.connectionpool.VerifiedHTTPSConnection
- _original_requests_http_connection = \
- requests.packages.urllib3.connectionpool.HTTPConnection
- if requests.__version__.startswith('2'):
- _original_requests_https_connection_pool_cls = \
- requests.packages.urllib3.connectionpool.HTTPSConnectionPool.ConnectionCls
- _original_requests_http_connection_pool_cls = \
- requests.packages.urllib3.connectionpool.HTTPConnectionPool.ConnectionCls
-except ImportError:
- pass
-
-try:
- import urllib3
- _original_urllib3_verified_https_connection = \
- urllib3.connectionpool.VerifiedHTTPSConnection
- _original_urllib3_http_connection = urllib3.connectionpool.HTTPConnection
-except ImportError:
- pass
-
-
-#------------------------------------------------------------------------------
-# Patching methods
-#------------------------------------------------------------------------------
-
-def _patch_httplib(settings):
- httplib.HTTPSConnection = httplib.HTTPS._connection_class = \
- ReplayHTTPSConnection
- httplib.HTTPSConnection._replay_settings = settings
- httplib.HTTPConnection = httplib.HTTP._connection_class = \
- ReplayHTTPConnection
- httplib.HTTPConnection._replay_settings = settings
-
-
-def _patch_requests(settings):
- try:
- import requests
- import requests.packages.urllib3.connectionpool
- from .stubs.requests_stubs import ReplayRequestsHTTPSConnection
- requests.packages.urllib3.connectionpool.VerifiedHTTPSConnection = \
- ReplayRequestsHTTPSConnection
- requests.packages.urllib3.connectionpool.VerifiedHTTPSConnection.\
- _replay_settings = settings
- requests.packages.urllib3.connectionpool.HTTPConnection = \
- ReplayHTTPConnection
- requests.packages.urllib3.connectionpool.HTTPConnection.\
- _replay_settings = settings
- if requests.__version__.startswith('2'):
- requests.packages.urllib3.connectionpool.HTTPConnectionPool.ConnectionCls = \
- ReplayHTTPConnection
- requests.packages.urllib3.connectionpool.HTTPConnectionPool.ConnectionCls.\
- _replay_settings = settings
- requests.packages.urllib3.connectionpool.HTTPSConnectionPool.ConnectionCls = \
- ReplayRequestsHTTPSConnection
- requests.packages.urllib3.connectionpool.HTTPSConnectionPool.ConnectionCls.\
- _replay_settings = settings
- except ImportError:
- pass
-
-
-def _patch_urllib3(settings):
- try:
- import urllib3.connectionpool
- from .stubs.urllib3_stubs import ReplayUrllib3HTTPSConnection
- urllib3.connectionpool.VerifiedHTTPSConnection = \
- ReplayUrllib3HTTPSConnection
- urllib3.connectionpool.VerifiedHTTPSConnection._replay_settings = \
- settings
- urllib3.connectionpool.HTTPConnection = ReplayHTTPConnection
- urllib3.connectionpool.HTTPConnection._replay_settings = settings
- except ImportError:
- pass
-
-
-def start_replay(replay_file_name, **kwargs):
- """
- Start using the ``httreplay`` library.
-
- Patches the various supported HTTP-requesting libraries
- (httplib, requests, urllib3) and starts reading from/writing
- to the replay file on disk.
-
- Because HTTP requests and responses may contain sensitive data,
- and because they may vary in inconsequential ways that you may
- wish to ignore, the ``httreplay`` provides several hooks to "filter"
- the request contents to generate a stable key suitable for your
- needs. Some example "filters" may be found in the ``utils.py`` file,
- which is currently a grab-bag of things the ``httreplay`` author
- has found useful, no matter how silly.
-
- :param replay_file_name: The file from which to load and save replays.
- :type replay_file_name: string
- :param url_key: Function that generates a stable key from a URL.
- :type url_key: function
- :param body_key: Function that generates a stable key from a
- request body.
- :type body_key: function
- :param headers_key: Function that generates a stable key from a
- dictionary of headers.
- :type headers_key: function
- """
- settings = ReplaySettings(replay_file_name, **kwargs)
- _patch_httplib(settings)
- _patch_requests(settings)
- _patch_urllib3(settings)
-
-
-#------------------------------------------------------------------------------
-# Un-patching methods
-#------------------------------------------------------------------------------
-
-def _unpatch_httplib():
- httplib.HTTPSConnection = httplib.HTTPS._connection_class = \
- _original_https_connection
- httplib.HTTPConnection = httplib.HTTP._connection_class = \
- _original_http_connection
-
-
-def _unpatch_requests():
- try:
- import requests
- import requests.packages.urllib3.connectionpool
- requests.packages.urllib3.connectionpool.VerifiedHTTPSConnection = \
- _original_requests_verified_https_connection
- requests.packages.urllib3.connectionpool.HTTPConnection = \
- _original_requests_http_connection
- if requests.__version__.startswith('2'):
- requests.packages.urllib3.connectionpool.HTTPSConnectionPool.ConnectionCls = \
- _original_requests_https_connection_pool_cls
- requests.packages.urllib3.connectionpool.HTTPConnectionPool.ConnectionCls = \
- _original_requests_http_connection_pool_cls
- except ImportError:
- pass
-
-
-def _unpatch_urllib3():
- try:
- import urllib3.connectionpool
- urllib3.connectionpool.VerifiedHTTPSConnection = \
- _original_urllib3_verified_https_connection
- urllib3.connectionpool.HTTPConnection = \
- _original_urllib3_http_connection
- except ImportError:
- pass
-
-
-def stop_replay():
- """
- Remove all patches installed by the ``httreplay`` library and end replay.
- """
- _unpatch_httplib()
- _unpatch_requests()
- _unpatch_urllib3()
diff --git a/python-packages/httreplay/recording.py b/python-packages/httreplay/recording.py
deleted file mode 100644
index f9d5cd0558..0000000000
--- a/python-packages/httreplay/recording.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import os
-import json
-import logging
-
-
-logger = logging.getLogger(__name__)
-
-
-class ReplayRecording(object):
- """
- Holds on to a set of request keys and their response values.
- Can be used to reproduce HTTP/HTTPS responses without using
- the network.
- """
- def __init__(self, jsonable=None):
- self.request_responses = []
- if jsonable:
- self._from_jsonable(jsonable)
-
- def _from_jsonable(self, jsonable):
- self.request_responses = [
- (r['request'], r['response']) for r in jsonable ]
-
- def to_jsonable(self):
- return [dict(request=request, response=response)
- for request, response in self.request_responses]
-
- def __contains__(self, request):
- return any(rr[0] == request for rr in self.request_responses)
-
- def __getitem__(self, request):
- try:
- return next(rr[1] for rr in self.request_responses if rr[0] == request)
- except StopIteration:
- raise KeyError
-
- def __setitem__(self, request, response):
- self.request_responses.append((request, response))
-
- def get(self, request, default=None):
- try:
- return self[request]
- except KeyError:
- return default
-
-
-class ReplayRecordingManager(object):
- """
- Loads and saves replay recordings as to json files.
- """
- @classmethod
- def load(cls, recording_file_name):
- try:
- with open(recording_file_name) as recording_file:
- recording = ReplayRecording(json.load(
- recording_file,
- cls=RequestResponseDecoder))
- except IOError:
- logger.debug("ReplayRecordingManager starting new %r",
- os.path.basename(recording_file_name))
- recording = ReplayRecording()
- else:
- logger.debug("ReplayRecordingManager loaded from %r",
- os.path.basename(recording_file_name))
- return recording
-
- @classmethod
- def save(cls, recording, recording_file_name):
- logger.debug("ReplayRecordingManager saving to %r",
- os.path.basename(recording_file_name))
- dirname, _ = os.path.split(recording_file_name)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
- with open(recording_file_name, 'w') as recording_file:
- json.dump(
- recording.to_jsonable(),
- recording_file,
- indent=4,
- sort_keys=True,
- cls=RequestResponseEncoder)
-
-
-class RequestResponseDecoder(json.JSONDecoder):
- def __init__(self, *args, **kwargs):
- kwargs['object_hook'] = self.object_hook
- super(RequestResponseDecoder, self).__init__(*args, **kwargs)
-
- @staticmethod
- def object_hook(d):
- if len(d) == 2 and set(d) == set(['__type__', '__data__']):
- modname = d['__type__'].rsplit('.', 1)[0]
- cls = __import__(modname)
- for attr in d['__type__'].split('.')[1:]:
- cls = getattr(cls, attr)
- d = cls(d['__data__'])
- return d
-
-
-class RequestResponseEncoder(json.JSONEncoder):
- def default(self, obj):
- try:
- from requests.structures import CaseInsensitiveDict
- except ImportError:
- pass
- else:
- if isinstance(obj, CaseInsensitiveDict):
- return {
- '__type__': 'requests.structures.CaseInsensitiveDict',
- '__data__': obj._store,
- }
-
- # Let the base class default method raise the TypeError
- return json.JSONEncoder.default(self, obj)
diff --git a/python-packages/httreplay/replay_settings.py b/python-packages/httreplay/replay_settings.py
deleted file mode 100644
index fa801549c7..0000000000
--- a/python-packages/httreplay/replay_settings.py
+++ /dev/null
@@ -1,34 +0,0 @@
-class ReplaySettings(object):
- """Captures settings for the current replay session."""
- def __init__(self, replay_file_name, url_key=None, body_key=None,
- headers_key=None, allow_network=True):
- """
- Configure the ``httreplay`` library.
-
- Because HTTP requests and responses may contain sensitive data,
- and because they may vary in inconsequential ways that you may
- wish to ignore, the ``httreplay`` provides several hooks to "filter"
- the request contents to generate a stable key suitable for your
- needs. Some example "filters" may be found in the ``utils.py`` file,
- which is currently a grab-bag of things the ``httreplay`` author
- has found useful, no matter how silly.
-
- :param replay_file_name: The file from which to load and save replays.
- :type replay_file_name: string
- :param url_key: Function that generates a stable key from a URL.
- :type url_key: function
- :param body_key: Function that generates a stable key from a
- request body.
- :type body_key: function
- :param headers_key: Function that generates a stable key from a
- dictionary of headers.
- :type headers_key: function
- :param allow_network: Whether to allow outbound network calls in
- the absence of saved data. Defaults to True.
- :type allow_network: boolean
- """
- self.replay_file_name = replay_file_name
- self.url_key = url_key
- self.body_key = body_key
- self.headers_key = headers_key
- self.allow_network = allow_network
diff --git a/python-packages/httreplay/stubs/__init__.py b/python-packages/httreplay/stubs/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/httreplay/stubs/base.py b/python-packages/httreplay/stubs/base.py
deleted file mode 100644
index 4794d548e1..0000000000
--- a/python-packages/httreplay/stubs/base.py
+++ /dev/null
@@ -1,295 +0,0 @@
-from httplib import HTTPConnection, HTTPSConnection, HTTPMessage
-from cStringIO import StringIO
-import logging
-import quopri
-import zlib
-
-from ..recording import ReplayRecordingManager
-
-
-logger = logging.getLogger(__name__)
-
-
-class ReplayError(Exception):
- """Generic error base class for the httreplay library."""
- pass
-
-
-class ReplayConnectionHelper:
- """
- Mixin that provides the ability to serialize and deserialize
- requests and responses into a recording.
- """
- def __init__(self):
- self.__fake_send = False
- self.__recording_data = None
-
- # Some hacks to manage the presence (or not) of the connection's
- # socket. Requests 2.x likes to set settings on the socket, but
- # only checks whether the connection hasattr('sock') -- not whether
- # the sock itself is None (which is actually its default value,
- # and which httplib likes to see.) Yeesh.
- def __socket_del(self):
- if hasattr(self, 'sock') and (self.sock is None):
- del self.sock
-
- def __socket_none(self):
- if not hasattr(self, 'sock'):
- self.sock = None
-
- @property
- def __recording(self):
- """Provide the current recording, or create a new one if needed."""
- recording = self.__recording_data
- if not recording:
- recording = self.__recording_data = \
- ReplayRecordingManager.load(
- self._replay_settings.replay_file_name)
- return recording
-
- # All httplib requests use the sequence putrequest(), putheader(),
- # then endheaders() -> _send_output() -> send()
-
- def putrequest(self, method, url, **kwargs):
- self.__socket_none()
- # Store an incomplete request; this will be completed when
- # endheaders() is called.
- self.__request = dict(
- method=method,
- _url=url,
- _headers={},
- )
- return self._baseclass.putrequest(self, method, url, **kwargs)
-
- def putheader(self, header, *values):
- self.__socket_none()
- # Always called after putrequest() so the dict is prepped.
- val = self.__request['_headers'].get(header)
- # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
- val = '' if val is None else val + ','
- val += '\r\n\t'.join(values)
- self.__request['_headers'][header] = val
- return self._baseclass.putheader(self, header, *values)
-
- def endheaders(self, message_body=None):
- self.__socket_del()
- # If a key generator for the URL is provided, use it.
- # Otherwise, simply use the URL itself as the URL key.
- url = self.__request.pop('_url')
- if self._replay_settings.url_key:
- url_key = self._replay_settings.url_key(url)
- else:
- url_key = url
-
- # If a key generator for the headers is provided, use it.
- # Otherwise, simply use the headers directly.
- headers = self.__request.pop('_headers')
- if self._replay_settings.headers_key:
- headers_key = self._replay_settings.headers_key(headers)
- else:
- headers_key = headers
-
- # message_body can be a file; handle that before generating
- # body_key
- if message_body and callable(getattr(message_body, 'read', None)):
- body_content = message_body.read()
- message_body = StringIO(body_content) # for continuity
- else:
- body_content = message_body
-
- # If a key generator for the body is provided, use it.
- # Otherwise, simply use the body itself as the body key.
- if body_content is not None and self._replay_settings.body_key:
- body_key = self._replay_settings.body_key(body_content)
- else:
- body_key = body_content
-
- self.__request.update(dict(
- # method already present
- url=url_key,
- headers=headers_key,
- body=body_key,
- host=self.host,
- port=self.port,
- ))
-
- # endheaders() will eventually call send()
- logstr = '%(method)s %(host)s:%(port)s/%(url)s' % self.__request
- if self.__request in self.__recording:
- logger.debug("ReplayConnectionHelper found %s", logstr)
- self.__fake_send = True
- else:
- logger.debug("ReplayConnectionHelper trying %s", logstr)
- # result = self._baseclass.endheaders(self, message_body)
- result = self._baseclass.endheaders(self)
- self.__fake_send = False
- return result
-
- def send(self, msg):
- if not self.__fake_send:
- self.__socket_none()
- return self._baseclass.send(self, msg)
-
- def getresponse(self, buffering=False):
- """
- Provide a response from the current recording if possible.
- Otherwise, perform the network request. This function ALWAYS
- returns ReplayHTTPResponse() regardless so it's consistent between
- initial recording and later.
- """
- self.__socket_none()
- replay_response = self.__recording.get(self.__request)
-
- if replay_response:
- # Not calling the underlying getresponse(); do the same cleanup
- # that it would have done. However since the cleanup is on
- # class-specific members (self.__state and self.__response) this
- # is the easiest way.
- self.close()
-
- elif self._replay_settings.allow_network:
- logger.debug("ReplayConnectionHelper calling %s.getresponse()", self._baseclass.__name__)
-
- response = self._baseclass.getresponse(self)
- replay_response = ReplayHTTPResponse.make_replay_response(response)
- self.__recording[self.__request] = replay_response
- ReplayRecordingManager.save(
- self.__recording,
- self._replay_settings.replay_file_name)
-
- else:
- logger.debug("ReplayConnectionHelper 418 (allow_network=False)")
-
- replay_response = dict(
- status=dict(code=418, message="I'm a teapot"),
- headers={},
- body_quoted_printable='Blocked by allow_network=3DFalse')
-
- return ReplayHTTPResponse(replay_response, method=self.__request['method'])
-
- def close(self):
- self.__socket_none()
- self._baseclass.close(self)
-
-
-class ReplayHTTPConnection(ReplayConnectionHelper, HTTPConnection):
- """Generic HTTPConnection with replay."""
- _baseclass = HTTPConnection
-
- def __init__(self, *args, **kwargs):
- HTTPConnection.__init__(self, *args, **kwargs)
- ReplayConnectionHelper.__init__(self)
-
-
-class ReplayHTTPSConnection(ReplayConnectionHelper, HTTPSConnection):
- """Generic HTTPSConnection with replay."""
- _baseclass = HTTPSConnection
-
- def __init__(self, *args, **kwargs):
- # I overrode the init and copied a lot of the code from the parent
- # class because when this happens, HTTPConnection has been replaced
- # by ReplayHTTPConnection, but doing it here lets us use the original
- # one.
- HTTPConnection.__init__(self, *args, **kwargs)
- ReplayConnectionHelper.__init__(self)
- self.key_file = kwargs.pop('key_file', None)
- self.cert_file = kwargs.pop('cert_file', None)
-
-
-class ReplayHTTPResponse(object):
- """
- A replay response object, with just enough functionality to make
- the various HTTP/URL libraries out there happy.
- """
- __text_content_types = (
- 'text/',
- 'application/json',
- )
-
- def __init__(self, replay_response, method=None):
- self.reason = replay_response['status']['message']
- self.status = replay_response['status']['code']
- self.version = None
- if 'body_quoted_printable' in replay_response:
- self._content = quopri.decodestring(replay_response['body_quoted_printable'])
- else:
- self._content = replay_response['body'].decode('base64')
- self.fp = StringIO(self._content)
-
- msg_fp = StringIO('\r\n'.join('{}: {}'.format(h, v)
- for h, v in replay_response['headers'].iteritems()))
- self.msg = HTTPMessage(msg_fp)
- self.msg.fp = None # httplib does this, okay?
-
- length = self.msg.getheader('content-length')
- self.length = int(length) if length else None
-
- # Save method to handle HEAD specially as httplib does
- self._method = method
-
- @classmethod
- def make_replay_response(cls, response):
- """
- Converts real response to replay_response dict which can be saved
- and/or used to initialize a ReplayHTTPResponse.
- """
- replay_response = {}
- body = response.read() # undecoded byte string
-
- # Add body to replay_response, either as quoted printable for
- # text responses or base64 for binary responses.
- if response.getheader('content-type', '') \
- .startswith(cls.__text_content_types):
- if response.getheader('content-encoding') in ['gzip', 'deflate']:
- # http://stackoverflow.com/questions/2695152
- body = zlib.decompress(body, 16 + zlib.MAX_WBITS)
- del response.msg['content-encoding']
- # decompression changes the length
- if 'content-length' in response.msg:
- response.msg['content-length'] = str(len(body))
- replay_response['body_quoted_printable'] = quopri.encodestring(body)
- else:
- replay_response['body'] = body.encode('base64')
-
- replay_response.update(dict(
- status=dict(code=response.status, message=response.reason),
- headers=dict(response.getheaders())))
- return replay_response
-
- def close(self):
- self.fp = None
-
- def isclosed(self):
- return self.fp is None
-
- def read(self, amt=None):
- """
- The important parts of HTTPResponse.read()
- """
- if self.fp is None:
- return ''
-
- if self._method == 'HEAD':
- self.close()
- return ''
-
- if self.length is not None:
- amt = min(amt, self.length)
-
- # StringIO doesn't like read(None)
- s = self.fp.read() if amt is None else self.fp.read(amt)
- if not s:
- self.close()
-
- if self.length is not None:
- self.length -= len(s)
- if not self.length:
- self.close()
-
- return s
-
- def getheader(self, name, default=None):
- return self.msg.getheader(name, default)
-
- def getheaders(self):
- return self.msg.items()
diff --git a/python-packages/httreplay/stubs/requests_stubs.py b/python-packages/httreplay/stubs/requests_stubs.py
deleted file mode 100644
index 31c52cae60..0000000000
--- a/python-packages/httreplay/stubs/requests_stubs.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from requests.packages.urllib3.connectionpool import VerifiedHTTPSConnection
-from .base import ReplayHTTPSConnection
-
-
-class ReplayRequestsHTTPSConnection(
- ReplayHTTPSConnection, VerifiedHTTPSConnection):
- _baseclass = VerifiedHTTPSConnection
diff --git a/python-packages/httreplay/stubs/urllib3_stubs.py b/python-packages/httreplay/stubs/urllib3_stubs.py
deleted file mode 100644
index 822a68c7a7..0000000000
--- a/python-packages/httreplay/stubs/urllib3_stubs.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from urllib3.connectionpool import VerifiedHTTPSConnection
-from .base import ReplayHTTPSConnection
-
-
-class ReplayUrllib3HTTPSConnection(
- ReplayHTTPSConnection, VerifiedHTTPSConnection):
- _baseclass = VerifiedHTTPSConnection
diff --git a/python-packages/httreplay/utils.py b/python-packages/httreplay/utils.py
deleted file mode 100644
index fe0b38cb30..0000000000
--- a/python-packages/httreplay/utils.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import urllib
-import urlparse
-
-
-def sort_string(s):
- """A simple little toy to sort a string."""
- return ''.join(sorted(list(s))) if s else s
-
-
-def sort_string_key():
- """Returns a key function that produces a key by sorting a string."""
- return sort_string
-
-
-def filter_query_params(url, remove_params):
- """
- Remove all provided parameters from the query section of the ``url``.
-
- :param remove_params: A list of (param, newvalue) to scrub from the URL.
- :type remove_params: list
- """
- if not url:
- return url
-
- remove_params = dict((p, None) if isinstance(p, basestring) else p
- for p in remove_params)
-
- parsed_url = urlparse.urlparse(url)
- parsed_qsl = urlparse.parse_qsl(parsed_url.query, keep_blank_values=True)
-
- filtered_qsl = [(p, remove_params.get(p, v)) for p, v in parsed_qsl]
- filtered_qsl = [(p, v) for p, v in filtered_qsl if v is not None]
-
- filtered_url = urlparse.ParseResult(
- scheme=parsed_url.scheme,
- netloc=parsed_url.netloc,
- path=parsed_url.path,
- params=parsed_url.params,
- query=urllib.urlencode(filtered_qsl),
- fragment=parsed_url.fragment)
-
- return urlparse.urlunparse(filtered_url)
-
-
-def filter_query_params_key(remove_params):
- """
- Returns a key function that produces a key by removing params from a URL.
-
- :param remove_params: A list of query params to scrub from provided URLs.
- :type remove_params: list
- """
- def filter(url):
- return filter_query_params(url, remove_params)
- return filter
-
-
-def filter_headers(headers, remove_headers):
- """
- Remove undesired headers from the provided ``headers`` dict.
- The header keys are case-insensitive.
-
- :param remove_headers: A list of header names to remove or redact.
- :type remove_headers: list
- """
- # Upgrade bare 'header' to ('header', None) in remove_headers
- remove_headers = [(h, None) if isinstance(h, basestring) else h
- for h in remove_headers]
-
- # Make remove_headers a dict with lower-cased keys
- remove_headers = dict((h.lower(), v) for h, v in remove_headers)
-
- # Replace values in headers with values from remove_headers
- headers = dict((h, remove_headers.get(h.lower(), v))
- for h, v in headers.items())
-
- # Remove any that ended up None
- headers = dict((h, v) for h, v in headers.items() if v is not None)
- return headers
-
-
-def filter_headers_key(remove_headers):
- """
- Returns a key function that produces a key by removing headers from a dict.
-
- :param remove_headers: A list of header names to remove.
- :type remove_headers: list
- """
- def filter(headers):
- return filter_headers(headers, remove_headers)
- return filter
diff --git a/python-packages/importlib/__init__.py b/python-packages/importlib/__init__.py
deleted file mode 100644
index ad31a1ac47..0000000000
--- a/python-packages/importlib/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""Backport of importlib.import_module from 3.x."""
-# While not critical (and in no way guaranteed!), it would be nice to keep this
-# code compatible with Python 2.3.
-import sys
-
-def _resolve_name(name, package, level):
- """Return the absolute name of the module to be imported."""
- if not hasattr(package, 'rindex'):
- raise ValueError("'package' not set to a string")
- dot = len(package)
- for x in xrange(level, 1, -1):
- try:
- dot = package.rindex('.', 0, dot)
- except ValueError:
- raise ValueError("attempted relative import beyond top-level "
- "package")
- return "%s.%s" % (package[:dot], name)
-
-
-def import_module(name, package=None):
- """Import a module.
-
- The 'package' argument is required when performing a relative import. It
- specifies the package to use as the anchor point from which to resolve the
- relative import to an absolute import.
-
- """
- if name.startswith('.'):
- if not package:
- raise TypeError("relative imports require the 'package' argument")
- level = 0
- for character in name:
- if character != '.':
- break
- level += 1
- name = _resolve_name(name[level:], package, level)
- __import__(name)
- return sys.modules[name]
diff --git a/python-packages/iso8601/LICENSE b/python-packages/iso8601/LICENSE
deleted file mode 100644
index 5ca93dae79..0000000000
--- a/python-packages/iso8601/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2007 Michael Twomey
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/python-packages/iso8601/README b/python-packages/iso8601/README
deleted file mode 100644
index 5ec9d45597..0000000000
--- a/python-packages/iso8601/README
+++ /dev/null
@@ -1,26 +0,0 @@
-A simple package to deal with ISO 8601 date time formats.
-
-ISO 8601 defines a neutral, unambiguous date string format, which also
-has the property of sorting naturally.
-
-e.g. YYYY-MM-DDTHH:MM:SSZ or 2007-01-25T12:00:00Z
-
-Currently this covers only the most common date formats encountered, not
-all of ISO 8601 is handled.
-
-Currently the following formats are handled:
-
-* 2006-01-01T00:00:00Z
-* 2006-01-01T00:00:00[+-]00:00
-
-I'll add more as I encounter them in my day to day life. Patches with
-new formats and tests will be gratefully accepted of course :)
-
-References:
-
-* http://www.cl.cam.ac.uk/~mgk25/iso-time.html - simple overview
-
-* http://hydracen.com/dx/iso8601.htm - more detailed enumeration of
- valid formats.
-
-See the LICENSE file for the license this package is released under.
diff --git a/python-packages/iso8601/__init__.py b/python-packages/iso8601/__init__.py
deleted file mode 100644
index e72e3563bc..0000000000
--- a/python-packages/iso8601/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from iso8601 import *
diff --git a/python-packages/iso8601/iso8601.py b/python-packages/iso8601/iso8601.py
deleted file mode 100644
index f923938b2d..0000000000
--- a/python-packages/iso8601/iso8601.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""ISO 8601 date time string parsing
-
-Basic usage:
->>> import iso8601
->>> iso8601.parse_date("2007-01-25T12:00:00Z")
-datetime.datetime(2007, 1, 25, 12, 0, tzinfo=)
->>>
-
-"""
-
-from datetime import datetime, timedelta, tzinfo
-import re
-
-__all__ = ["parse_date", "ParseError"]
-
-# Adapted from http://delete.me.uk/2005/03/iso8601.html
-ISO8601_REGEX = re.compile(r"(?P[0-9]{4})(-(?P[0-9]{1,2})(-(?P[0-9]{1,2})"
- r"((?P.)(?P[0-9]{2}):(?P[0-9]{2})(:(?P[0-9]{2})(\.(?P[0-9]+))?)?"
- r"(?PZ|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?"
-)
-TIMEZONE_REGEX = re.compile("(?P[+-])(?P[0-9]{2}).(?P[0-9]{2})")
-
-class ParseError(Exception):
- """Raised when there is a problem parsing a date string"""
-
-# Yoinked from python docs
-ZERO = timedelta(0)
-class Utc(tzinfo):
- """UTC
-
- """
- def utcoffset(self, dt):
- return ZERO
-
- def tzname(self, dt):
- return "UTC"
-
- def dst(self, dt):
- return ZERO
-UTC = Utc()
-
-class FixedOffset(tzinfo):
- """Fixed offset in hours and minutes from UTC
-
- """
- def __init__(self, offset_hours, offset_minutes, name):
- self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
- self.__name = name
-
- def utcoffset(self, dt):
- return self.__offset
-
- def tzname(self, dt):
- return self.__name
-
- def dst(self, dt):
- return ZERO
-
- def __repr__(self):
- return "" % self.__name
-
-def parse_timezone(tzstring, default_timezone=UTC):
- """Parses ISO 8601 time zone specs into tzinfo offsets
-
- """
- if tzstring == "Z":
- return default_timezone
- # This isn't strictly correct, but it's common to encounter dates without
- # timezones so I'll assume the default (which defaults to UTC).
- # Addresses issue 4.
- if tzstring is None:
- return default_timezone
- m = TIMEZONE_REGEX.match(tzstring)
- prefix, hours, minutes = m.groups()
- hours, minutes = int(hours), int(minutes)
- if prefix == "-":
- hours = -hours
- minutes = -minutes
- return FixedOffset(hours, minutes, tzstring)
-
-def parse_date(datestring, default_timezone=UTC):
- """Parses ISO 8601 dates into datetime objects
-
- The timezone is parsed from the date string. However it is quite common to
- have dates without a timezone (not strictly correct). In this case the
- default timezone specified in default_timezone is used. This is UTC by
- default.
- """
- if not isinstance(datestring, basestring):
- raise ParseError("Expecting a string %r" % datestring)
- m = ISO8601_REGEX.match(datestring)
- if not m:
- raise ParseError("Unable to parse date string %r" % datestring)
- groups = m.groupdict()
- tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
- if groups["fraction"] is None:
- groups["fraction"] = 0
- else:
- groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6)
- return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]),
- int(groups["hour"]), int(groups["minute"]), int(groups["second"]),
- int(groups["fraction"]), tz)
diff --git a/python-packages/iso8601/test_iso8601.py b/python-packages/iso8601/test_iso8601.py
deleted file mode 100644
index ff9e2731cf..0000000000
--- a/python-packages/iso8601/test_iso8601.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import iso8601
-
-def test_iso8601_regex():
- assert iso8601.ISO8601_REGEX.match("2006-10-11T00:14:33Z")
-
-def test_timezone_regex():
- assert iso8601.TIMEZONE_REGEX.match("+01:00")
- assert iso8601.TIMEZONE_REGEX.match("+00:00")
- assert iso8601.TIMEZONE_REGEX.match("+01:20")
- assert iso8601.TIMEZONE_REGEX.match("-01:00")
-
-def test_parse_date():
- d = iso8601.parse_date("2006-10-20T15:34:56Z")
- assert d.year == 2006
- assert d.month == 10
- assert d.day == 20
- assert d.hour == 15
- assert d.minute == 34
- assert d.second == 56
- assert d.tzinfo == iso8601.UTC
-
-def test_parse_date_fraction():
- d = iso8601.parse_date("2006-10-20T15:34:56.123Z")
- assert d.year == 2006
- assert d.month == 10
- assert d.day == 20
- assert d.hour == 15
- assert d.minute == 34
- assert d.second == 56
- assert d.microsecond == 123000
- assert d.tzinfo == iso8601.UTC
-
-def test_parse_date_fraction_2():
- """From bug 6
-
- """
- d = iso8601.parse_date("2007-5-7T11:43:55.328Z'")
- assert d.year == 2007
- assert d.month == 5
- assert d.day == 7
- assert d.hour == 11
- assert d.minute == 43
- assert d.second == 55
- assert d.microsecond == 328000
- assert d.tzinfo == iso8601.UTC
-
-def test_parse_date_tz():
- d = iso8601.parse_date("2006-10-20T15:34:56.123+02:30")
- assert d.year == 2006
- assert d.month == 10
- assert d.day == 20
- assert d.hour == 15
- assert d.minute == 34
- assert d.second == 56
- assert d.microsecond == 123000
- assert d.tzinfo.tzname(None) == "+02:30"
- offset = d.tzinfo.utcoffset(None)
- assert offset.days == 0
- assert offset.seconds == 60 * 60 * 2.5
-
-def test_parse_invalid_date():
- try:
- iso8601.parse_date(None)
- except iso8601.ParseError:
- pass
- else:
- assert 1 == 2
-
-def test_parse_invalid_date2():
- try:
- iso8601.parse_date("23")
- except iso8601.ParseError:
- pass
- else:
- assert 1 == 2
-
-def test_parse_no_timezone():
- """issue 4 - Handle datetime string without timezone
-
- This tests what happens when you parse a date with no timezone. While not
- strictly correct this is quite common. I'll assume UTC for the time zone
- in this case.
- """
- d = iso8601.parse_date("2007-01-01T08:00:00")
- assert d.year == 2007
- assert d.month == 1
- assert d.day == 1
- assert d.hour == 8
- assert d.minute == 0
- assert d.second == 0
- assert d.microsecond == 0
- assert d.tzinfo == iso8601.UTC
-
-def test_parse_no_timezone_different_default():
- tz = iso8601.FixedOffset(2, 0, "test offset")
- d = iso8601.parse_date("2007-01-01T08:00:00", default_timezone=tz)
- assert d.tzinfo == tz
-
-def test_space_separator():
- """Handle a separator other than T
-
- """
- d = iso8601.parse_date("2007-06-23 06:40:34.00Z")
- assert d.year == 2007
- assert d.month == 6
- assert d.day == 23
- assert d.hour == 6
- assert d.minute == 40
- assert d.second == 34
- assert d.microsecond == 0
- assert d.tzinfo == iso8601.UTC
diff --git a/python-packages/khan_api_python/README.md b/python-packages/khan_api_python/README.md
deleted file mode 100644
index 3d2726da1d..0000000000
--- a/python-packages/khan_api_python/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-Khan Academy API (Python wrapper)
-=========
-
-This is a Python wrapper for the Khan Academy API.
-
-Documentation
-Khan Academy API: https://github.com/Khan/khan-api/wiki/Khan-Academy-API
-
-To use:
-
-In order to support multiple authentication sessions to the Khan Academy API, and different language settings, every call to the API is done through a Khan() session.
-
-```python
-from api_models import *
-```
-By default lang is set to "en", here we are setting it to Spanish.
-```python
-khan = Khan(lang="es")
-```
-Get entire Khan Academy topic tree
-```python
-topic_tree = khan.get_topic_tree()
-```
-Get information for a user - by default it will be whatever user you log in as, but if you are a coach for other users, can retrieve their information also
-If not already authenticated, this will create an OAuth authentication session which will need to be verified via the browser.
-```python
-current_user = khan.get_user()
-```
-
-Khan session object methods available for most documented items in the API.
-
-```python
-khan.get_badge_category()
-khan.get_badges()
-khan.get_exercise("")
-khan.get_exercises()
-khan.get_topic_exercises("")
-khan.get_topic_videos("")
-khan.get_topic_tree()
-khan.get_user("")
-khan.get_video("")
-khan.get_playlists()
-khan.get_playlist_exercises("")
-khan.get_playlist_videos("")
-```
-
-No authentication is required for anything but user data. In order to authenticate to retrieve user data, the secrets.py.template needs to be copied to secrets.py and a CONSUMER_KEY and CONSUMER_SECRET entered.
-
-In addition to documented API endpoints, this wrapper also exposes the following functions.
-
-```python
-khan.get_videos()
-khan.get_assessment_item("")
-khan.get_tags()
-```
-
-
-You can register your app with the Khan Academy API here to get these two items:
-https://www.khanacademy.org/api-apps/register
\ No newline at end of file
diff --git a/python-packages/khan_api_python/UPDATING.md b/python-packages/khan_api_python/UPDATING.md
deleted file mode 100644
index 967951bd12..0000000000
--- a/python-packages/khan_api_python/UPDATING.md
+++ /dev/null
@@ -1,10 +0,0 @@
-To update this library within the KA Lite repo, we use git subtree.
-
-First add the Khan Python API repo as a remote to your local git repository:
-```
-git remote add -f ka-api-py https://github.com/learningequality/khan-api-python.git
-```
-You can now update the repo with the following command:
-```
-git subtree pull --prefix=python-packages/khan_api_python ka-api-py master
-```
\ No newline at end of file
diff --git a/python-packages/khan_api_python/__init__.py b/python-packages/khan_api_python/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/khan_api_python/api_models.py b/python-packages/khan_api_python/api_models.py
deleted file mode 100644
index 8542532d31..0000000000
--- a/python-packages/khan_api_python/api_models.py
+++ /dev/null
@@ -1,719 +0,0 @@
-import requests
-import json
-import cgi
-import os
-import SocketServer
-import SimpleHTTPServer
-import sys
-import copy
-from decorator import decorator
-from functools import partial
-
-try:
- from secrets import CONSUMER_KEY, CONSUMER_SECRET
-except ImportError:
- CONSUMER_KEY = None
- CONSUMER_SECRET = None
-from test_oauth_client import TestOAuthClient
-from oauth import OAuthToken
-
-
-class APIError(Exception):
-
- """
- Custom Exception Class for returning meaningful errors which are caused by changes
- in the Khan Academy API.
- """
-
- def __init__(self, msg, obj=None):
- self.msg = msg
- self.obj = obj
-
- def __str__(self):
- inspection = ""
- if self.obj:
- for id in id_to_kind_map:
- if id(self.obj):
- inspection = "This occurred in an object of kind %s, called %s." % (
- id_to_kind_map[id], id(self.obj))
- if not inspection:
- inspection = "Object could not be inspected. Summary of object keys here: %s" % str(
- self.obj.keys())
- return "Khan API Error: %s %s" % (self.msg, inspection)
-
-
-def create_callback_server(session):
- """
- Adapted from https://github.com/Khan/khan-api/blob/master/examples/test_client/test.py
- Simple server to handle callbacks from OAuth request to browser.
- """
-
- class CallbackHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
-
- def do_GET(self):
-
- params = cgi.parse_qs(self.path.split(
- '?', 1)[1], keep_blank_values=False)
- session.REQUEST_TOKEN = OAuthToken(params['oauth_token'][
- 0], params['oauth_token_secret'][0])
- session.REQUEST_TOKEN.set_verifier(params['oauth_verifier'][0])
-
- self.send_response(200)
- self.send_header('Content-Type', 'text/plain')
- self.end_headers()
- self.wfile.write(
- 'OAuth request token fetched; you can close this window.')
-
- def log_request(self, code='-', size='-'):
- pass
-
- server = SocketServer.TCPServer(('127.0.0.1', 0), CallbackHandler)
- return server
-
-
-class AttrDict(dict):
-
- """
- Base class to give dictionary values from JSON objects are object properties.
- Recursively turn all dictionary sub-objects, and lists of dictionaries
- into AttrDicts also.
- """
-
- def __init__(self, *args, **kwargs):
- super(AttrDict, self).__init__(*args, **kwargs)
-
- def __getattr__(self, name):
- value = self[name]
- if isinstance(value, dict):
- value = AttrDict(value)
- if isinstance(value, list):
- for i in range(len(value)):
- if isinstance(value[i], dict):
- value[i] = AttrDict(value[i])
- return value
-
- def __setattr__(self, name, value):
- self[name] = value
-
-
-class APIModel(AttrDict):
-
- # _related_field_types = None # this is a dummy; do not use directly
-
- # _lazy_related_field_types = None # this is a dummy.
-
- # _API_attributes = None # this is also a dummy.
-
- def __getattr__(self, name):
- """
- Check to see if the attribute already exists in the object.
- If so, return that attribute according to super.
- If not, and the attribute is in API_attributes for this class,
- then make the appropriate API call to fetch the data, and set it
- into the object, so that repeated queries will not requery the API.
- """
- if name in self:
- if name.startswith("_"):
- return super(APIModel, self).__getattr__(name)
- if name in self._lazy_related_field_types or name in self._related_field_types:
- self._session.convert_items(name, self, loaded=(name in self._related_field_types))
- return self[name]
- else:
- return super(APIModel, self).__getattr__(name)
- if name in self._API_attributes:
- self[name] = api_call("v1", self.API_url(name), self._session)
- self._session.convert_items(name, self)
- return self[name]
- if not self._loaded and name not in self:
- self.fetch()
- if name in self._related_field_types:
- self._session.convert_items(name, self)
- return self[name]
- else:
- return super(APIModel, self).__getattr__(name)
-
- def __init__(self, *args, **kwargs):
-
- session = kwargs.get('session')
- loaded = kwargs.get('loaded', True)
- kwargs.pop('session', None)
- kwargs.pop('loaded', None)
- super(APIModel, self).__init__(*args, **kwargs)
- self._session = session
- self._loaded = loaded
- self._related_field_types = {}
- self._lazy_related_field_types = {}
- self._API_attributes = {}
-
- def API_url(self, name):
- """
- Generate the url from which to make API calls.
- """
- id = "/" + kind_to_id_map.get(self.kind)(
- self) if kind_to_id_map.get(self.kind) else ""
- get_param = "?" + get_key_to_get_param_map.get(kind_to_get_key_map.get(
- self.kind)) + "=" + self.get(kind_to_get_key_map.get(self.kind)) if kind_to_get_key_map.get(self.kind) else ""
- if self._session.lang:
- get_param = get_param + "&lang=" if get_param else "?lang="
- get_param += self._session.lang
- return self.base_url + id + self._API_attributes[name] + get_param
-
- def fetch(self):
- self.update(api_call(
- "v1", self.base_url + "/" + self[kind_to_id_map.get(type(self).__name__, "id")], self._session))
- self._loaded = True
-
- def toJSON(self):
- output = {}
- for key in self._related_field_types.keys() + self._lazy_related_field_types.keys():
- if self.get(key, None):
- if isinstance(self[key], APIModel):
- output[key] = self[key].toJSON()
- elif isinstance(self[key], dict):
- output[key] = json.dumps(self[key])
- elif isinstance(self[key], list):
- output[key] = []
- for i, item in enumerate(self[key]):
- if isinstance(self[key][i], APIModel):
- output[key].append(self[key][i].toJSON())
- elif isinstance(self[key][i], dict):
- output[key].append(json.dumps(self[key][i]))
- for key in self:
- if key not in self._related_field_types.keys() + self._lazy_related_field_types.keys():
- if not (key.startswith("_") or hasattr(self[key], '__call__')):
- output[key] = self[key]
- return json.dumps(output)
-
-def api_call(target_version, target_api_url, session, debug=False, authenticate=True):
- """
- Generic API call function, that will try to use an authenticated request if available,
- otherwise will fall back to non-authenticated request.
- """
- # TODO : Use requests for both kinds of authentication.
- # usage : api_call("v1", "/badges")
- resource_url = "/api/" + target_version + target_api_url
- try:
- if authenticate and session.REQUEST_TOKEN and session.ACCESS_TOKEN:
- client = TestOAuthClient(
- session.SERVER_URL, CONSUMER_KEY, CONSUMER_SECRET)
- response = client.access_resource(
- resource_url, session.ACCESS_TOKEN)
- else:
- response = requests.get(session.SERVER_URL + resource_url).content
- json_object = json.loads(response)
- except Exception as e:
- print e, "for target: %(target)s " % {"target": target_api_url}
- return {}
- if(debug):
- print json_object
- return json_object
-
-
-def n_deep(obj, names):
- """
- A function to descend len(names) levels in an object and retrieve the attribute there.
- """
- for name in names:
- try:
- obj = getattr(obj, name)
- except KeyError:
- raise APIError(
- "This object is missing the %s attribute." % name, obj)
- return obj
-
-
-class Khan():
-
- SERVER_URL = "http://www.khanacademy.org"
-
- # Set authorization objects to prevent errors when checking for Auth.
-
- def __init__(self, lang=None):
- self.lang = lang
- self.REQUEST_TOKEN = None
- self.ACCESS_TOKEN = None
-
- def require_authentication(self):
- """
- Decorator to require authentication for particular request events.
- """
- if not (self.REQUEST_TOKEN and self.ACCESS_TOKEN):
- print "This data requires authentication."
- self.authenticate()
- return (self.REQUEST_TOKEN and self.ACCESS_TOKEN)
-
- def authenticate(self):
- """
- Adapted from https://github.com/Khan/khan-api/blob/master/examples/test_client/test.py
- First pass at browser based OAuth authentication.
- """
- # TODO: Allow PIN access for non-browser enabled devices.
-
- if CONSUMER_KEY and CONSUMER_SECRET:
-
- server = create_callback_server(self)
-
- client = TestOAuthClient(
- self.SERVER_URL, CONSUMER_KEY, CONSUMER_SECRET)
-
- client.start_fetch_request_token(
- 'http://127.0.0.1:%d/' % server.server_address[1])
-
- server.handle_request()
-
- server.server_close()
-
- self.ACCESS_TOKEN = client.fetch_access_token(self.REQUEST_TOKEN)
- else:
- print "Consumer key and secret not set in secrets.py - authenticated access to API unavailable."
-
- def class_by_kind(self, node, session=None, loaded=True):
- """
- Function to turn a dictionary into a Python object of the appropriate kind,
- based on the "kind" attribute found in the dictionary.
- """
- # TODO: Fail better or prevent failure when "kind" is missing.
- try:
- return kind_to_class_map[node["kind"]](node, session=self, loaded=loaded)
- except KeyError:
- raise APIError(
- "This kind of object should have a 'kind' attribute.", node)
-
- def convert_list_to_classes(self, nodelist, session=None, class_converter=None, loaded=True):
- """
- Convert each element of the list (in-place) into an instance of a subclass of APIModel.
- You can pass a particular class to `class_converter` if you want to, or it will auto-select by kind.
- """
- if not class_converter:
- class_converter = self.class_by_kind
- for i in range(len(nodelist)):
- nodelist[i] = class_converter(nodelist[i], session=self, loaded=loaded)
-
- return nodelist # just for good measure; it's already been changed
-
- def class_by_name(self, node, name, session=None, loaded=True):
- """
- Function to turn a dictionary into a Python object of the kind given by name.
- """
- if isinstance(node, str) or isinstance(node, unicode):
- # Assume just an id has been supplied - otherwise there's not much we can do.
- node = {"id": node}
- if isinstance(node, dict):
- return kind_to_class_map[name](node, session=self, loaded=loaded)
- else:
- return node
-
- def convert_items(self, name, obj, loaded=True):
- """
- Convert attributes of an object to related object types.
- If in a list call to convert each element of the list.
- """
- class_converter = obj._related_field_types.get(name, None) or obj._lazy_related_field_types.get(name, None)
- # convert dicts to the related type
- if isinstance(obj[name], dict):
- obj[name] = class_converter(obj[name], session=self, loaded=loaded)
- # convert every item in related list to correct type
- elif isinstance(obj[name], list):
- self.convert_list_to_classes(obj[
- name], class_converter=class_converter, loaded=loaded)
-
- def params(self):
- if self.lang:
- return "?lang=" + self.lang
- else:
- return ""
-
- def get_exercises(self):
- """
- Return list of all exercises in the Khan API
- """
- return self.convert_list_to_classes(api_call("v1", Exercise.base_url + self.params(), self))
-
- def get_exercise(self, exercise_id):
- """
- Return particular exercise, by "exercise_id"
- """
- return Exercise(api_call("v1", Exercise.base_url + "/" + exercise_id + self.params(), self), session=self)
-
- def get_badges(self):
- """
- Return list of all badges in the Khan API
- """
- return self.convert_list_to_classes(api_call("v1", Badge.base_url + self.params(), self))
-
- def get_badge_category(self, category_id=None):
- """
- Return list of all badge categories in the Khan API, or a particular category.
- """
- if category_id is not None:
- return BadgeCategory(api_call("v1", BadgeCategory.base_url + "/categories/" + str(category_id) + self.params(), self)[0], session=self)
- else:
- return self.convert_list_to_classes(api_call("v1", BadgeCategory.base_url + "/categories" + self.params(), self))
-
- def get_user(self, user_id=""):
- """
- Download user data for a particular user.
- If no user specified, download logged in user's data.
- """
- if self.require_authentication():
- return User(api_call("v1", User.base_url + "?userId=" + user_id + self.params(), self), session=self)
-
- def get_topic_tree(self):
- """
- Retrieve complete node tree starting at the specified root_slug and descending.
- """
- return Topic(api_call("v1", "/topictree" + self.params(), self), session=self)
-
- def get_topic(self, topic_slug):
- """
- Retrieve complete topic at the specified topic_slug and descending.
- """
- return Topic(api_call("v1", Topic.base_url + "/" + topic_slug + self.params(), self), session=self)
-
- def get_topic_exercises(self, topic_slug):
- """
- This will return a list of exercises in the highest level of a topic.
- Not lazy loading from get_tree, as any load of the topic data includes these.
- """
- return self.convert_list_to_classes(api_call("v1", Topic.base_url + "/" + topic_slug + "/exercises" + self.params(), self))
-
- def get_topic_videos(self, topic_slug):
- """
- This will return a list of videos in the highest level of a topic.
- Not lazy loading from get_tree, as any load of the topic data includes these.
- """
- return self.convert_list_to_classes(api_call("v1", Topic.base_url + "/" + topic_slug + "/videos" + self.params(), self))
-
- def get_video(self, video_id):
- """
- Return particular video, by "readable_id" or "youtube_id" (deprecated)
- """
- return Video(api_call("v1", Video.base_url + "/" + video_id + self.params(), self), session=self)
-
- def get_videos(self):
- """
- Return list of all videos.
- As no API endpoint is provided for this by Khan Academy, this function fetches the topic tree,
- and recurses all the nodes in order to find all the videos in the topic tree.
- """
- topic_tree = self.get_topic_tree()
-
- video_nodes = {}
-
- def recurse_nodes(node):
- # Add the video to the video nodes
- kind = node["kind"]
-
- if node["id"] not in video_nodes and kind=="Video":
- video_nodes[node["id"]] = node
-
- # Do the recursion
- for child in node.get("children", []):
- recurse_nodes(child)
- recurse_nodes(topic_tree)
-
- return self.convert_list_to_classes(video_nodes.values())
-
- def get_playlists(self):
- """
- Return list of all playlists in the Khan API
- """
- return self.convert_list_to_classes(api_call("v1", Playlist.base_url + self.params(), self))
-
- def get_playlist_exercises(self, topic_slug):
- """
- This will return a list of exercises in a playlist.
- """
- return self.convert_list_to_classes(api_call("v1", Playlist.base_url + "/" + topic_slug + "/exercises" + self.params(), self))
-
- def get_playlist_videos(self, topic_slug):
- """
- This will return a list of videos in the highest level of a playlist.
- """
- return self.convert_list_to_classes(api_call("v1", Playlist.base_url + "/" + topic_slug + "/videos" + self.params(), self))
-
- def get_assessment_item(self, assessment_id):
- """
- Return particular assessment item, by "assessment_id"
- """
- return AssessmentItem(api_call("v1", AssessmentItem.base_url + "/" + assessment_id + self.params(), self), session=self)
-
- def get_tags(self):
- """
- Return list of all assessment item tags in the Khan API
- """
- return self.convert_list_to_classes(api_call("v1", Tag.base_url + self.params(), self), class_converter=Tag)
-
-class Exercise(APIModel):
-
- base_url = "/exercises"
-
- _API_attributes = {
- "related_videos": "/videos",
- "followup_exercises": "/followup_exercises"
- }
-
- def __init__(self, *args, **kwargs):
-
- super(Exercise, self).__init__(*args, **kwargs)
- self._related_field_types = {
- "related_videos": partial(self._session.class_by_name, name="Video"),
- "followup_exercises": partial(self._session.class_by_name, name="Exercise"),
- "problem_types": partial(self._session.class_by_name, name="ProblemType"),
- }
- self._lazy_related_field_types = {
- "all_assessment_items": partial(self._session.class_by_name, name="AssessmentItem"),
- }
-
-
-class ProblemType(APIModel):
- def __init__(self, *args, **kwargs):
- super(ProblemType, self).__init__(*args, **kwargs)
- self._lazy_related_field_types = {
- "assessment_items": partial(self._session.class_by_name, name="AssessmentItem"),
- }
- if self.has_key("items"):
- self.assessment_items = self["items"]
- del self["items"]
-
-class AssessmentItem(APIModel):
- """
- A class to lazily load assessment item data for Perseus Exercise questions.
- """
-
- base_url = "/assessment_items"
-
- def __init__(self, *args, **kwargs):
-
- super(AssessmentItem, self).__init__(*args, **kwargs)
-
-class Tag(APIModel):
- """
- A class for tags for Perseus Assessment Items.
- """
-
- base_url = "/assessment_items/tags"
-
-class Badge(APIModel):
-
- base_url = "/badges"
-
- def __init__(self, *args, **kwargs):
-
- super(Badge, self).__init__(*args, **kwargs)
-
- self._related_field_types = {
- "user_badges": self._session.class_by_kind,
- }
-
-
-class BadgeCategory(APIModel):
- pass
-
-
-class APIAuthModel(APIModel):
-
- def __getattr__(self, name):
- # Added to avoid infinite recursion during authentication
- if name == "_session":
- return super(APIAuthModel, self).__getattr__(name)
- elif self._session.require_authentication():
- return super(APIAuthModel, self).__getattr__(name)
-
- # TODO: Add API_url function to add "?userID=" + user_id to each item
- # Check that classes other than User have user_id field.
-
-
-class User(APIAuthModel):
-
- base_url = "/user"
-
- _API_attributes = {
- "videos": "/videos",
- "exercises": "/exercises",
- "students": "/students",
- }
-
- def __init__(self, *args, **kwargs):
-
- super(User, self).__init__(*args, **kwargs)
-
- self._related_field_types = {
- "videos": partial(self._session.class_by_name, name="UserVideo"),
- "exercises": partial(self._session.class_by_name, name="UserExercise"),
- "students": partial(self._session.class_by_name, name="User"),
- }
-
-
-class UserExercise(APIAuthModel):
-
- base_url = "/user/exercises"
-
- _API_attributes = {
- "log": "/log",
- "followup_exercises": "/followup_exercises",
- }
-
- def __init__(self, *args, **kwargs):
-
- super(UserExercise, self).__init__(*args, **kwargs)
-
- self._related_field_types = {
- "exercise_model": self._session.class_by_kind,
- "followup_exercises": self._session.class_by_kind,
- "log": partial(self._session.class_by_name, name="ProblemLog"),
- }
-
-
-class UserVideo(APIAuthModel):
- base_url = "/user/videos"
-
- _API_attributes = {
- "log": "/log",
- }
-
- def __init__(self, *args, **kwargs):
-
- super(UserVideo, self).__init__(*args, **kwargs)
-
- self._related_field_types = {
- "video": self._session.class_by_kind,
- "log": partial(self._session.class_by_name, name="VideoLog"),
- }
-
-
-class UserBadge(APIAuthModel):
- pass
-
-# ProblemLog and VideoLog API calls return multiple entities in a list
-
-
-class ProblemLog(APIAuthModel):
- pass
-
-
-class VideoLog(APIAuthModel):
- pass
-
-
-class Topic(APIModel):
-
- base_url = "/topic"
-
- def __init__(self, *args, **kwargs):
-
- super(Topic, self).__init__(*args, **kwargs)
-
- self._related_field_types = {
- "children": self._session.class_by_kind,
- }
-
-class Playlist(APIModel):
-
- base_url = "/playlists"
-
- def __init__(self, *args, **kwargs):
-
- super(Playlist, self).__init__(*args, **kwargs)
-
- self._related_field_types = {
- "children": self._session.class_by_kind,
- }
-
-
-class Separator(APIModel):
- pass
-
-
-class Scratchpad(APIModel):
- pass
-
-
-class Article(APIModel):
- pass
-
-
-class Video(APIModel):
-
- base_url = "/videos"
-
- _API_attributes = {"related_exercises": "/exercises"}
-
- def __init__(self, *args, **kwargs):
-
- super(Video, self).__init__(*args, **kwargs)
-
- self._related_field_types = {
- "related_exercises": self._session.class_by_kind,
- }
-
-
-# kind_to_class_map maps from the kinds of data found in the topic tree,
-# and other nested data structures to particular classes.
-# If Khan Academy add any new types of data to topic tree, this will break
-# the topic tree rendering.
-
-
-kind_to_class_map = {
- "Video": Video,
- "Exercise": Exercise,
- "Topic": Topic,
- "Separator": Separator,
- "Scratchpad": Scratchpad,
- "Article": Article,
- "User": User,
- "UserData": User,
- "UserBadge": UserBadge,
- "UserVideo": UserVideo,
- "UserExercise": UserExercise,
- "ProblemLog": ProblemLog,
- "VideoLog": VideoLog,
- "Playlist": Playlist,
- "ProblemType": ProblemType,
- "AssessmentItem": AssessmentItem,
- "AssessmentItemTag": Tag,
-}
-
-
-# Different API endpoints use different attributes as the id, depending on the kind of the item.
-# This map defines the id to use for API calls, depending on the kind of
-# the item.
-
-
-kind_to_id_map = {
- "Video": partial(n_deep, names=["readable_id"]),
- "Exercise": partial(n_deep, names=["name"]),
- "Topic": partial(n_deep, names=["slug"]),
- "Playlist": partial(n_deep, names=["slug"]),
- # "User": partial(n_deep, names=["user_id"]),
- # "UserData": partial(n_deep, names=["user_id"]),
- "UserExercise": partial(n_deep, names=["exercise"]),
- "UserVideo": partial(n_deep, names=["video", "youtube_id"]),
- "ProblemLog": partial(n_deep, names=["exercise"]),
- "VideoLog": partial(n_deep, names=["video_title"]),
-}
-
-kind_to_get_key_map = {
- "User": "user_id",
- "UserData": "user_id",
- "UserExercise": "user",
- "UserVideo": "user",
-}
-
-get_key_to_get_param_map = {
- "user_id": "userId",
- "user": "username",
-}
-
-id_to_kind_map = {value: key for key, value in kind_to_id_map.items()}
-
-if __name__ == "__main__":
- # print t.name
- # print t.children
- # print t.children[0].__class__
- # print t.children[1].__class__
- # print api_call("v1", "/videos");
- # print api_call("nothing");
- # Video.get_video("adding-subtracting-negative-numbers")
- # Video.get_video("C38B33ZywWs")
- Topic.get_tree()
diff --git a/python-packages/khan_api_python/api_models_test.py b/python-packages/khan_api_python/api_models_test.py
deleted file mode 100644
index bb79f34d68..0000000000
--- a/python-packages/khan_api_python/api_models_test.py
+++ /dev/null
@@ -1,239 +0,0 @@
-import unittest
-from api_models import *
-
-class ApiCallExerciseTest(unittest.TestCase):
- """Performs an API call to fetch exercises and verifies the result.
-
- Attributes:
- exercises_list_object: A list of Exercise objects.
- exercise_object: An Exercise object that was specifically requested.
- """
-
- def setUp(self):
- """Prepares the objects that will be tested."""
- self.exercises_list_object = Khan().get_exercises()
- self.exercise_object = Khan().get_exercise("logarithms_1")
-
- def test_get_exercises(self):
- """Tests if the result is an empty list or if it is a list of Exercise objects."""
- if not self.exercises_list_object:
- self.assertListEqual(self.exercises_list_object, [])
- else:
- for obj in self.exercises_list_object:
- self.assertIsInstance(obj, Exercise)
-
- def test_get_exercise(self):
- """Tests if the result object contains the requested Exercise ID."""
- self.assertEqual("logarithms_1", self.exercise_object.name)
-
- def test_get_exercise_related_videos(self):
- """Tests if the result is and empty list or if it is a list of Video objects."""
- if not self.exercise_object.related_videos:
- self.assertListEqual(self.exercise_object.related_videos, [])
- else:
- for obj in self.exercise_object.related_videos:
- self.assertIsInstance(obj, Video)
-
- def test_get_exercise_followup_exercises(self):
- """Tests if the result is and empty list or if it is a list of Exercise objects."""
- if not self.exercise_object.followup_exercises:
- self.assertListEqual(self.exercise_object.followup_exercises, [])
- else:
- for obj in self.exercise_object.followup_exercises:
- self.assertIsInstance(obj, Exercise)
-
-
-class ApiCallBadgeTest(unittest.TestCase):
- """Performs an API call to fetch badges and verifies the result.
-
- Attributes:
- badges_list_object: A list of Badge objects.
- badges_category_object: A BadgeCategory object that was specifically requested.
- badges_category_list_object: A list of BadgeCategory objects.
- """
-
- def setUp(self):
- """Prepares the objects that will be tested."""
- self.badges_list_object = Khan().get_badges()
- self.badges_category_object = Khan().get_badge_category(1)
- self.badges_category_list_object = Khan().get_badge_category()
-
- def test_get_badges(self):
- """Tests if the result is an empty list or if it is a list of Bagde objects."""
- if not self.badges_list_object:
- self.assertListEqual(self.badges_list_object, [])
- else:
- for obj in self.badges_list_object:
- self.assertIsInstance(obj, Badge)
-
- def test_get_category(self):
- """Tests if the result object contains the requested Badge category."""
- self.assertEqual(self.badges_category_object.category, 1)
-
- def test_get_category_list(self):
- """Tests if the result is an empty list or if it is a list of BadgeCategory objects."""
- if not self.badges_category_list_object:
- self.assertListEqual(self.badges_category_list_object, [])
- else:
- for obj in self.badges_category_list_object:
- self.assertIsInstance(obj, BadgeCategory)
-
-
-
-class ApiCallUserTest(unittest.TestCase):
- """Performs an API call to fetch user data and verifies the result.
-
- This test will require login in Khan Academy.
-
- Attributes:
- user_object: An User object that is created after the user login.
- badges_object: A Badge object that cointains UserBadge objects if the user is logged in.
- """
-
- def setUp(self):
- """Prepares the objects that will be tested."""
- self.user_object = Khan().get_user()
- self.badges_object = Khan().get_badges()
-
- def test_get_user(self):
- """Tests if the result is an instance of User. The object is created if the result of the API call is a success."""
- self.assertIsInstance(self.user_object, User)
-
- def test_get_user_videos(self):
- """Tests if the result is an empty list or if it is a list of UserVideo objects.
- For each UserVideo object check if log contains VideoLog objects.
- """
- if not self.user_object.videos:
- self.assertListEqual(self.user_object.videos, [])
- else:
- for obj in self.user_object.videos:
- self.assertIsInstance(obj, UserVideo)
- if not obj.log:
- self.assertListEqual(obj.log, [])
- else:
- for l_obj in obj.log:
- self.assertIsInstance(l_obj, VideoLog)
-
- def test_get_user_exercises(self):
- """Tests if the result is an empty list or if it is a list of UserExercise objects.
- For each UserExercise object, checks if log attribute only contains ProblemLog objects
- and if followup_exercises attribute only contains UserExercise objects.
- """
- if not self.user_object.exercises:
- self.assertListEqual(self.user_object.exercises, [])
- else:
- for obj in self.user_object.exercises:
- self.assertIsInstance(obj, UserExercise)
- if not obj.log:
- self.assertListEqual(obj.log, [])
- else:
- for l_obj in obj.log:
- self.assertIsInstance(l_obj, ProblemLog)
- if not obj.followup_exercises:
- self.assertListEqual(obj.followup_exercises, [])
- else:
- for f_obj in obj.followup_exercises:
- self.assertIsInstance(f_obj, UserExercise)
-
- def test_get_user_badges(self):
- """Tests if the result is an empty list or if it is a list of Badge objects.
- Then for each Badge, if it contains the user_badges key, it must be an instance of User Badges.
- """
- if not self.badges_object:
- self.assertListEqual(self.badges_object, [])
- else:
- for obj in self.badges_object:
- if not obj.__contains__("user_badges"):
- continue
- else:
- for u_obj in obj.user_badges:
- self.assertIsInstance(u_obj, UserBadge)
-
-
-class ApiCallTopicTest(unittest.TestCase):
- """Performs an API call to fetch Topic data and verifies the result.
-
- Attributes:
- topic_tree_object: A Topic object that represents the entire Topic tree.
- topic_subtree_object: A Topic object that was specifically requested. It represents a subtree.
- """
-
- def setUp(self):
- """Prepares the objects that will be tested."""
- self.topic_tree_object = Khan().get_topic_tree()
- self.topic_subtree_object = Khan().get_topic_tree("addition-subtraction")
- self.topic_exercises_list_object = Khan().get_topic_exercises("addition-subtraction")
- self.topic_videos_list_object = Khan().get_topic_videos("addition-subtraction")
-
- def test_get_tree(self):
- """Tests if the result is an instance of Topic."""
- self.assertIsInstance(self.topic_tree_object, Topic)
-
- def test_get_subtree(self):
- """Tests if the result object contains the requested topic slug."""
- self.assertEqual("addition-subtraction", self.topic_subtree_object.slug)
-
- def test_get_topic_exercises(self):
- """Tests if the result is an empty list or if it is a list of Exercise objects."""
- if not self.topic_exercises_list_object:
- self.assertListEqual(self.topic_exercises_list_object, [])
- else:
- for obj in self.topic_exercises_list_object:
- self.assertIsInstance(obj, Exercise)
-
- def test_get_topic_videos(self):
- """Tests if the result is an emtpy list or if it is a list of Video objects."""
- if not self.topic_videos_list_object:
- self.assertListEqual(self.topic_videos_list_object, [])
- else:
- for obj in self.topic_videos_list_object:
- self.assertIsInstance(obj, Video)
-
-
-
-class ApiCallVideoTest(unittest.TestCase):
- """Performs an API call to fetch video data and verifies the result.
-
- Attributes:
- video_object: A Video object that was specifically requested.
- """
-
- def setUp(self):
- """Prepares the objects that will be tested."""
- self.video_object = Khan().get_video("adding-subtracting-negative-numbers")
-
- def test_get_video(self):
- """Tests if the result object contains the requested video readable id."""
- self.assertEqual("adding-subtracting-negative-numbers", self.video_object.readable_id)
-
-
-
-def prepare_suites_from_test_cases(case_class_list):
- """
- This function prepares a list of suites to be tested.
- """
- test_suites = []
- for cls in case_class_list:
- test_suites.append(unittest.TestLoader().loadTestsFromTestCase(cls))
- return test_suites
-
-
-
-# "test_cases" contains the classes that will be tested.
-# Add or remove test cases as needed.
-test_cases = [
-
- ApiCallExerciseTest,
- ApiCallBadgeTest,
- ApiCallUserTest,
- ApiCallTopicTest,
- ApiCallVideoTest,
-
-]
-
-# Prepares a set of suites.
-all_tests = unittest.TestSuite(prepare_suites_from_test_cases(test_cases))
-
-# Runs all tests on suites passed as an argument.
-unittest.TextTestRunner(verbosity=2).run(all_tests)
-
diff --git a/python-packages/khan_api_python/oauth.py b/python-packages/khan_api_python/oauth.py
deleted file mode 100644
index a7694c10d9..0000000000
--- a/python-packages/khan_api_python/oauth.py
+++ /dev/null
@@ -1,666 +0,0 @@
-"""
-The MIT License
-
-Copyright (c) 2007 Leah Culver
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-"""
-
-import logging
-logger = logging.getLogger()
-
-import cgi
-import urllib
-import time
-import random
-import urlparse
-import hmac
-import binascii
-
-
-VERSION = '1.0' # Hi Blaine!
-HTTP_METHOD = 'GET'
-SIGNATURE_METHOD = 'PLAINTEXT'
-
-
-class OAuthError(RuntimeError):
- """Generic exception class."""
- def __init__(self, message='OAuth error occured.'):
- self.message = message
-
-def build_authenticate_header(realm=''):
- """Optional WWW-Authenticate header (401 error)"""
- return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
-
-def escape(s):
- """Escape a URL including any /."""
- return urllib.quote(s, safe='~')
-
-def _utf8_str(s):
- """Convert unicode to utf-8."""
- if isinstance(s, unicode):
- return s.encode("utf-8")
- else:
- return str(s)
-
-def generate_timestamp():
- """Get seconds since epoch (UTC)."""
- return int(time.time())
-
-def generate_nonce(length=8):
- """Generate pseudorandom number."""
- return ''.join([str(random.randint(0, 9)) for i in range(length)])
-
-def generate_verifier(length=8):
- """Generate pseudorandom number."""
- return ''.join([str(random.randint(0, 9)) for i in range(length)])
-
-
-class OAuthConsumer(object):
- """Consumer of OAuth authentication.
-
- OAuthConsumer is a data type that represents the identity of the Consumer
- via its shared secret with the Service Provider.
-
- """
- key = None
- secret = None
-
- def __init__(self, key, secret):
- self.key = key
- self.secret = secret
-
-
-class OAuthToken(object):
- """OAuthToken is a data type that represents an End User via either an access
- or request token.
-
- key -- the token
- secret -- the token secret
-
- """
- key = None
- secret = None
- callback = None
- callback_confirmed = None
- verifier = None
-
- def __init__(self, key, secret):
- self.key = key
- self.secret = secret
-
- def set_callback(self, callback):
- self.callback = callback
- self.callback_confirmed = 'true'
-
- def set_verifier(self, verifier=None):
- if verifier is not None:
- self.verifier = verifier
- else:
- self.verifier = generate_verifier()
-
- def get_callback_url(self):
- if self.callback and self.verifier:
- # Append the oauth_verifier.
- parts = urlparse.urlparse(self.callback)
- scheme, netloc, path, params, query, fragment = parts[:6]
- if query:
- query = '%s&oauth_verifier=%s' % (query, self.verifier)
- else:
- query = 'oauth_verifier=%s' % self.verifier
- return urlparse.urlunparse((scheme, netloc, path, params,
- query, fragment))
- return self.callback
-
- def to_string(self):
- data = {
- 'oauth_token': self.key,
- 'oauth_token_secret': self.secret,
- }
- if self.callback_confirmed is not None:
- data['oauth_callback_confirmed'] = self.callback_confirmed
- return urllib.urlencode(data)
-
- def from_string(s):
- """ Returns a token from something like:
- oauth_token_secret=xxx&oauth_token=xxx
- """
- params = cgi.parse_qs(s, keep_blank_values=False)
- key = params['oauth_token'][0]
- secret = params['oauth_token_secret'][0]
- token = OAuthToken(key, secret)
- try:
- token.callback_confirmed = params['oauth_callback_confirmed'][0]
- except KeyError:
- pass # 1.0, no callback confirmed.
- return token
- from_string = staticmethod(from_string)
-
- def __str__(self):
- return self.to_string()
-
-
-class OAuthRequest(object):
- """OAuthRequest represents the request and can be serialized.
-
- OAuth parameters:
- - oauth_consumer_key
- - oauth_token
- - oauth_signature_method
- - oauth_signature
- - oauth_timestamp
- - oauth_nonce
- - oauth_version
- - oauth_verifier
- ... any additional parameters, as defined by the Service Provider.
- """
- parameters = None # OAuth parameters.
- http_method = HTTP_METHOD
- http_url = None
- version = VERSION
-
- def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
- self.http_method = http_method
- self.http_url = http_url
- self.parameters = parameters or {}
-
- def set_parameter(self, parameter, value):
- self.parameters[parameter] = value
-
- def get_parameter(self, parameter):
- try:
- return self.parameters[parameter]
- except:
- raise OAuthError('Parameter not found: %s' % parameter)
-
- def _get_timestamp_nonce(self):
- return self.get_parameter('oauth_timestamp'), self.get_parameter(
- 'oauth_nonce')
-
- def get_nonoauth_parameters(self):
- """Get any non-OAuth parameters."""
- parameters = {}
- for k, v in self.parameters.iteritems():
- # Ignore oauth parameters.
- if k.find('oauth_') < 0:
- parameters[k] = v
- return parameters
-
- def to_header(self, realm=''):
- """Serialize as a header for an HTTPAuth request."""
- auth_header = 'OAuth realm="%s"' % realm
- # Add the oauth parameters.
- if self.parameters:
- for k, v in self.parameters.iteritems():
- if k[:6] == 'oauth_':
- auth_header += ', %s="%s"' % (k, escape(str(v)))
- return {'Authorization': auth_header}
-
- def to_postdata(self):
- """Serialize as post data for a POST request."""
- return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
- for k, v in self.parameters.iteritems()])
-
- def to_url(self):
- """Serialize as a URL for a GET request."""
- return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
-
- def get_normalized_parameters(self):
- """Return a string that contains the parameters that must be signed."""
- params = self.parameters
- try:
- # Exclude the signature if it exists.
- del params['oauth_signature']
- except:
- pass
- # Escape key values before sorting.
- key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
- for k,v in params.items()]
- # Sort lexicographically, first after key, then after value.
- key_values.sort()
- # Combine key value pairs into a string.
- return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
-
- def get_normalized_http_method(self):
- """Uppercases the http method."""
- return self.http_method.upper()
-
- def get_normalized_http_url(self):
- """Parses the URL and rebuilds it to be scheme://host/path."""
- parts = urlparse.urlparse(self.http_url)
- scheme, netloc, path = parts[:3]
- # Exclude default port numbers.
- if scheme == 'http' and netloc[-3:] == ':80':
- netloc = netloc[:-3]
- elif scheme == 'https' and netloc[-4:] == ':443':
- netloc = netloc[:-4]
- return '%s://%s%s' % (scheme, netloc, path)
-
- def sign_request(self, signature_method, consumer, token):
- """Set the signature parameter to the result of build_signature."""
- # Set the signature method.
- self.set_parameter('oauth_signature_method',
- signature_method.get_name())
- # Set the signature.
- self.set_parameter('oauth_signature',
- self.build_signature(signature_method, consumer, token))
-
- def build_signature(self, signature_method, consumer, token):
- """Calls the build signature method within the signature method."""
- return signature_method.build_signature(self, consumer, token)
-
- def from_request(http_method, http_url, headers=None, parameters=None,
- query_string=None):
- """Combines multiple parameter sources."""
- if parameters is None:
- parameters = {}
-
- # Headers
- if headers and 'Authorization' in headers:
- auth_header = headers['Authorization']
- # Check that the authorization header is OAuth.
- if auth_header[:6] == 'OAuth ':
- auth_header = auth_header[6:]
- try:
- # Get the parameters from the header.
- header_params = OAuthRequest._split_header(auth_header)
- parameters.update(header_params)
- except:
- raise OAuthError('Unable to parse OAuth parameters from '
- 'Authorization header.')
-
- # GET or POST query string.
- if query_string:
- query_params = OAuthRequest._split_url_string(query_string)
- parameters.update(query_params)
-
- # URL parameters.
- param_str = urlparse.urlparse(http_url)[4] # query
- url_params = OAuthRequest._split_url_string(param_str)
- parameters.update(url_params)
-
- if parameters:
- return OAuthRequest(http_method, http_url, parameters)
-
- return None
- from_request = staticmethod(from_request)
-
- def from_consumer_and_token(oauth_consumer, token=None,
- callback=None, verifier=None, http_method=HTTP_METHOD,
- http_url=None, parameters=None):
- if not parameters:
- parameters = {}
-
- defaults = {
- 'oauth_consumer_key': oauth_consumer.key,
- 'oauth_timestamp': generate_timestamp(),
- 'oauth_nonce': generate_nonce(),
- 'oauth_version': OAuthRequest.version,
- }
-
- defaults.update(parameters)
- parameters = defaults
-
- if token:
- parameters['oauth_token'] = token.key
- if token.callback:
- parameters['oauth_callback'] = token.callback
- # 1.0a support for verifier.
- if verifier:
- parameters['oauth_verifier'] = verifier
- elif callback:
- # 1.0a support for callback in the request token request.
- parameters['oauth_callback'] = callback
-
- return OAuthRequest(http_method, http_url, parameters)
- from_consumer_and_token = staticmethod(from_consumer_and_token)
-
- def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
- http_url=None, parameters=None):
- if not parameters:
- parameters = {}
-
- parameters['oauth_token'] = token.key
-
- if callback:
- parameters['oauth_callback'] = callback
-
- return OAuthRequest(http_method, http_url, parameters)
- from_token_and_callback = staticmethod(from_token_and_callback)
-
- def _split_header(header):
- """Turn Authorization: header into parameters."""
- params = {}
- parts = header.split(',')
- for param in parts:
- # Ignore realm parameter.
- if param.find('realm') > -1:
- continue
- # Remove whitespace.
- param = param.strip()
- # Split key-value.
- param_parts = param.split('=', 1)
- # Remove quotes and unescape the value.
- params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
- return params
- _split_header = staticmethod(_split_header)
-
- def _split_url_string(param_str):
- """Turn URL string into parameters."""
- parameters = cgi.parse_qs(param_str, keep_blank_values=False)
- for k, v in parameters.iteritems():
- parameters[k] = urllib.unquote(v[0])
- return parameters
- _split_url_string = staticmethod(_split_url_string)
-
-class OAuthServer(object):
- """A worker to check the validity of a request against a data store."""
- timestamp_threshold = 300 # In seconds, five minutes.
- version = VERSION
- signature_methods = None
- data_store = None
-
- def __init__(self, data_store=None, signature_methods=None):
- self.data_store = data_store
- self.signature_methods = signature_methods or {}
-
- def set_data_store(self, data_store):
- self.data_store = data_store
-
- def get_data_store(self):
- return self.data_store
-
- def add_signature_method(self, signature_method):
- self.signature_methods[signature_method.get_name()] = signature_method
- return self.signature_methods
-
- def fetch_request_token(self, oauth_request):
- """Processes a request_token request and returns the
- request token on success.
- """
- try:
- # Get the request token for authorization.
- token = self._get_token(oauth_request, 'request')
- except OAuthError:
- # No token required for the initial token request.
- version = self._get_version(oauth_request)
- consumer = self._get_consumer(oauth_request)
- try:
- callback = self.get_callback(oauth_request)
- except OAuthError:
- callback = None # 1.0, no callback specified.
- self._check_signature(oauth_request, consumer, None)
- # Fetch a new token.
- token = self.data_store.fetch_request_token(consumer, callback)
- return token
-
- def fetch_access_token(self, oauth_request):
- logger.debug("!!! IN OAuthServer.fetch_access_token OAuth Params: %s"%oauth_request.parameters)
-
- """Processes an access_token request and returns the
- access token on success.
- """
- version = self._get_version(oauth_request)
- consumer = self._get_consumer(oauth_request)
- try:
- verifier = self._get_verifier(oauth_request)
- except OAuthError:
- verifier = None
- # Get the request token.
- token = self._get_token(oauth_request, 'request')
- self._check_signature(oauth_request, consumer, token)
- new_token = self.data_store.fetch_access_token(consumer, token, verifier)
- return new_token
-
- def verify_request(self, oauth_request):
- """Verifies an api call and checks all the parameters."""
- # -> consumer and token
- version = self._get_version(oauth_request)
- consumer = self._get_consumer(oauth_request)
- # Get the access token.
- token = self._get_token(oauth_request, 'access')
- self._check_signature(oauth_request, consumer, token)
- parameters = oauth_request.get_nonoauth_parameters()
- return consumer, token, parameters
-
- def authorize_token(self, token, user):
- """Authorize a request token."""
- return self.data_store.authorize_request_token(token, user)
-
- def get_callback(self, oauth_request):
- """Get the callback URL."""
- return oauth_request.get_parameter('oauth_callback')
-
- def build_authenticate_header(self, realm=''):
- """Optional support for the authenticate header."""
- return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
-
- def _get_version(self, oauth_request):
- """Verify the correct version request for this server."""
- try:
- version = oauth_request.get_parameter('oauth_version')
- except:
- version = VERSION
- if version and version != self.version:
- raise OAuthError('OAuth version %s not supported.' % str(version))
- return version
-
- def _get_signature_method(self, oauth_request):
- """Figure out the signature with some defaults."""
- try:
- signature_method = oauth_request.get_parameter(
- 'oauth_signature_method')
- except:
- signature_method = SIGNATURE_METHOD
- try:
- # Get the signature method object.
- signature_method = self.signature_methods[signature_method]
- except:
- signature_method_names = ', '.join(self.signature_methods.keys())
- raise OAuthError('Signature method %s not supported try one of the '
- 'following: %s' % (signature_method, signature_method_names))
-
- return signature_method
-
- def _get_consumer(self, oauth_request):
- consumer_key = oauth_request.get_parameter('oauth_consumer_key')
- consumer = self.data_store.lookup_consumer(consumer_key)
- if not consumer:
- raise OAuthError('Invalid consumer.')
- return consumer
-
- def _get_token(self, oauth_request, token_type='access'):
- """Try to find the token for the provided request token key."""
- token_field = oauth_request.get_parameter('oauth_token')
- token = self.data_store.lookup_token(token_type, token_field)
- if not token:
- raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
- return token
-
- def _get_verifier(self, oauth_request):
- return oauth_request.get_parameter('oauth_verifier')
-
- def _check_signature(self, oauth_request, consumer, token):
- timestamp, nonce = oauth_request._get_timestamp_nonce()
- self._check_timestamp(timestamp)
- self._check_nonce(consumer, token, nonce)
- signature_method = self._get_signature_method(oauth_request)
- try:
- signature = oauth_request.get_parameter('oauth_signature')
- except:
- raise OAuthError('Missing signature.')
- # Validate the signature.
- valid_sig = signature_method.check_signature(oauth_request, consumer,
- token, signature)
- if not valid_sig:
- key, base = signature_method.build_signature_base_string(
- oauth_request, consumer, token)
- logging.error("key: %s",key)
- logging.error("base: %s",base)
- raise OAuthError('Invalid signature. Expected signature base '
- 'string: %s' % base)
- built = signature_method.build_signature(oauth_request, consumer, token)
-
- def _check_timestamp(self, timestamp):
- """Verify that timestamp is recentish."""
- timestamp = int(timestamp)
- now = int(time.time())
- lapsed = abs(now - timestamp)
- if lapsed > self.timestamp_threshold:
- raise OAuthError('Expired timestamp: given %d and now %s has a '
- 'greater difference than threshold %d' %
- (timestamp, now, self.timestamp_threshold))
-
- def _check_nonce(self, consumer, token, nonce):
- """Verify that the nonce is uniqueish."""
- nonce = self.data_store.lookup_nonce(consumer, token, nonce)
- if nonce:
- raise OAuthError('Nonce already used: %s' % str(nonce))
-
-
-class OAuthClient(object):
- """OAuthClient is a worker to attempt to execute a request."""
- consumer = None
- token = None
-
- def __init__(self, oauth_consumer, oauth_token):
- self.consumer = oauth_consumer
- self.token = oauth_token
-
- def get_consumer(self):
- return self.consumer
-
- def get_token(self):
- return self.token
-
- def fetch_request_token(self, oauth_request):
- """-> OAuthToken."""
- raise NotImplementedError
-
- def fetch_access_token(self, oauth_request):
- """-> OAuthToken."""
- raise NotImplementedError
-
- def access_resource(self, oauth_request):
- """-> Some protected resource."""
- raise NotImplementedError
-
-
-class OAuthDataStore(object):
- """A database abstraction used to lookup consumers and tokens."""
-
- def lookup_consumer(self, key):
- """-> OAuthConsumer."""
- raise NotImplementedError
-
- def lookup_token(self, oauth_consumer, token_type, token_token):
- """-> OAuthToken."""
- raise NotImplementedError
-
- def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
- """-> OAuthToken."""
- raise NotImplementedError
-
- def fetch_request_token(self, oauth_consumer, oauth_callback):
- """-> OAuthToken."""
- raise NotImplementedError
-
- def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
- """-> OAuthToken."""
- raise NotImplementedError
-
- def authorize_request_token(self, oauth_token, user):
- """-> OAuthToken."""
- raise NotImplementedError
-
-
-class OAuthSignatureMethod(object):
- """A strategy class that implements a signature method."""
- def get_name(self):
- """-> str."""
- raise NotImplementedError
-
- def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
- """-> str key, str raw."""
- raise NotImplementedError
-
- def build_signature(self, oauth_request, oauth_consumer, oauth_token):
- """-> str."""
- raise NotImplementedError
-
- def check_signature(self, oauth_request, consumer, token, signature):
- built = self.build_signature(oauth_request, consumer, token)
- logging.info("Built signature: %s"%(built))
- return built == signature
-
-
-class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
-
- def get_name(self):
- return 'HMAC-SHA1'
-
- def build_signature_base_string(self, oauth_request, consumer, token):
- sig = (
- escape(oauth_request.get_normalized_http_method()),
- escape(oauth_request.get_normalized_http_url()),
- escape(oauth_request.get_normalized_parameters()),
- )
-
- key = '%s&' % escape(consumer.secret)
- if token:
- key += escape(token.secret)
- raw = '&'.join(sig)
- return key, raw
-
- def build_signature(self, oauth_request, consumer, token):
- """Builds the base signature string."""
- key, raw = self.build_signature_base_string(oauth_request, consumer,
- token)
-
- if isinstance(key, unicode):
- key = str(key)
-
- # HMAC object.
- try:
- import hashlib # 2.5
- hashed = hmac.new(key, raw, hashlib.sha1)
- except:
- import sha # Deprecated
- hashed = hmac.new(key, raw, sha)
-
- # Calculate the digest base 64.
- return binascii.b2a_base64(hashed.digest())[:-1]
-
-
-class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
-
- def get_name(self):
- return 'PLAINTEXT'
-
- def build_signature_base_string(self, oauth_request, consumer, token):
- """Concatenates the consumer key and secret."""
- sig = '%s&' % escape(consumer.secret)
- if token:
- sig = sig + escape(token.secret)
- return sig, sig
-
- def build_signature(self, oauth_request, consumer, token):
- key, raw = self.build_signature_base_string(oauth_request, consumer,
- token)
- return key
diff --git a/python-packages/khan_api_python/secrets.py.template b/python-packages/khan_api_python/secrets.py.template
deleted file mode 100644
index 6bb6d45800..0000000000
--- a/python-packages/khan_api_python/secrets.py.template
+++ /dev/null
@@ -1,2 +0,0 @@
-CONSUMER_KEY = ""
-CONSUMER_SECRET = ""
diff --git a/python-packages/khan_api_python/test_oauth_client.py b/python-packages/khan_api_python/test_oauth_client.py
deleted file mode 100644
index c883f00667..0000000000
--- a/python-packages/khan_api_python/test_oauth_client.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#This file is a copy of https://github.com/Khan/khan-api/blob/master/examples/test_client/test_oauth_client.py
-
-import cgi
-import logging
-import urllib2
-import urlparse
-import webbrowser
-
-from oauth import OAuthConsumer, OAuthToken, OAuthRequest, OAuthSignatureMethod_HMAC_SHA1
-
-class TestOAuthClient(object):
-
- def __init__(self, server_url, consumer_key, consumer_secret):
- self.server_url = server_url
- self.consumer = OAuthConsumer(consumer_key, consumer_secret)
-
- def start_fetch_request_token(self, callback=None):
- oauth_request = OAuthRequest.from_consumer_and_token(
- self.consumer,
- callback=callback,
- http_url="%s/api/auth/request_token" % self.server_url
- )
-
- oauth_request.sign_request(OAuthSignatureMethod_HMAC_SHA1(), self.consumer, None)
- webbrowser.open(oauth_request.to_url())
-
- def fetch_access_token(self, request_token):
-
- oauth_request = OAuthRequest.from_consumer_and_token(
- self.consumer,
- token=request_token,
- verifier=request_token.verifier,
- http_url="%s/api/auth/access_token" % self.server_url
- )
-
- oauth_request.sign_request(OAuthSignatureMethod_HMAC_SHA1(), self.consumer, request_token)
-
- response = get_response(oauth_request.to_url())
-
- return OAuthToken.from_string(response)
-
- def access_resource(self, relative_url, access_token, method="GET"):
-
- full_url = self.server_url + relative_url
- url = urlparse.urlparse(full_url)
- query_params = cgi.parse_qs(url.query)
- for key in query_params:
- query_params[key] = query_params[key][0]
-
- oauth_request = OAuthRequest.from_consumer_and_token(
- self.consumer,
- token = access_token,
- http_url = full_url,
- parameters = query_params,
- http_method=method
- )
-
- oauth_request.sign_request(OAuthSignatureMethod_HMAC_SHA1(), self.consumer, access_token)
-
- if method == "GET":
- response = get_response(oauth_request.to_url())
- else:
- response = post_response(full_url, oauth_request.to_postdata())
-
- return response.strip()
-
-def get_response(url):
- response = ""
- file = None
- try:
- file = urllib2.urlopen(url)
- response = file.read()
- finally:
- if file:
- file.close()
-
- return response
-
-def post_response(url, data):
- response = ""
- file = None
- try:
- file = urllib2.urlopen(url, data)
- response = file.read()
- finally:
- if file:
- file.close()
-
- return response
diff --git a/python-packages/khanacademy/__init__.py b/python-packages/khanacademy/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/khanacademy/test_oauth_client.py b/python-packages/khanacademy/test_oauth_client.py
deleted file mode 100644
index 96b185f4b8..0000000000
--- a/python-packages/khanacademy/test_oauth_client.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import cgi
-import logging
-import urllib2
-import urlparse
-import webbrowser
-
-from oauth import OAuthConsumer, OAuthToken, OAuthRequest, OAuthSignatureMethod_HMAC_SHA1
-
-class TestOAuthClient(object):
-
- def __init__(self, server_url, consumer_key, consumer_secret):
- self.server_url = server_url
- self.consumer = OAuthConsumer(consumer_key, consumer_secret)
-
- def start_fetch_request_token(self, callback=None):
- oauth_request = OAuthRequest.from_consumer_and_token(
- self.consumer,
- callback=callback,
- http_url="%s/api/auth/request_token" % self.server_url
- )
-
- oauth_request.sign_request(OAuthSignatureMethod_HMAC_SHA1(), self.consumer, None)
- return oauth_request.to_url()
-
- def fetch_access_token(self, request_token):
- oauth_request = OAuthRequest.from_consumer_and_token(
- self.consumer,
- token=request_token,
- verifier=request_token.verifier,
- http_url="%s/api/auth/access_token" % self.server_url
- )
-
- oauth_request.sign_request(OAuthSignatureMethod_HMAC_SHA1(), self.consumer, request_token)
-
- response = get_response(oauth_request.to_url())
-
- return OAuthToken.from_string(response)
-
- def access_resource(self, relative_url, access_token, method="GET"):
-
- full_url = self.server_url + relative_url
- url = urlparse.urlparse(full_url)
- query_params = cgi.parse_qs(url.query)
- for key in query_params:
- query_params[key] = query_params[key][0]
-
- oauth_request = OAuthRequest.from_consumer_and_token(
- self.consumer,
- token = access_token,
- http_url = full_url,
- parameters = query_params,
- http_method=method
- )
-
- oauth_request.sign_request(OAuthSignatureMethod_HMAC_SHA1(), self.consumer, access_token)
-
- if method == "GET":
- response = get_response(oauth_request.to_url())
- else:
- response = post_response(full_url, oauth_request.to_postdata())
-
- return response.strip()
-
-def get_response(url):
- response = ""
- file = None
- try:
- file = urllib2.urlopen(url, timeout=300)
- response = file.read()
- finally:
- if file:
- file.close()
-
- return response
-
-def post_response(url, data):
- response = ""
- file = None
- try:
- file = urllib2.urlopen(url, data, timeout=300)
- response = file.read()
- finally:
- if file:
- file.close()
-
- return response
diff --git a/python-packages/memory_profiler.py b/python-packages/memory_profiler.py
deleted file mode 100755
index 1e77a68385..0000000000
--- a/python-packages/memory_profiler.py
+++ /dev/null
@@ -1,617 +0,0 @@
-"""Profile the memory usage of a Python program"""
-
-__version__ = '0.26'
-
-_CMD_USAGE = "python -m memory_profiler script_file.py"
-
-import time, sys, os, pdb
-import warnings
-import linecache
-import inspect
-import subprocess
-from copy import copy
-
-# TODO: provide alternative when multprocessing is not available
-try:
- from multiprocessing import Process, Pipe
-except ImportError:
- from multiprocessing.dummy import Process, Pipe
-
-
-try:
- import psutil
-
- def _get_memory(pid):
- process = psutil.Process(pid)
- try:
- mem = float(process.get_memory_info()[0]) / (1024 ** 2)
- except psutil.AccessDenied:
- mem = -1
- return mem
-
-
-except ImportError:
-
- warnings.warn("psutil module not found. memory_profiler will be slow")
-
- if os.name == 'posix':
- def _get_memory(pid):
- # ..
- # .. memory usage in MB ..
- # .. this should work on both Mac and Linux ..
- # .. subprocess.check_output appeared in 2.7, using Popen ..
- # .. for backwards compatibility ..
- out = subprocess.Popen(['ps', 'v', '-p', str(pid)],
- stdout=subprocess.PIPE).communicate()[0].split(b'\n')
- try:
- vsz_index = out[0].split().index(b'RSS')
- return float(out[1].split()[vsz_index]) / 1024
- except:
- return -1
- else:
- raise NotImplementedError('The psutil module is required for non-unix '
- 'platforms')
-
-
-class Timer(Process):
- """
- Fetch memory consumption from over a time interval
- """
-
- def __init__(self, monitor_pid, interval, pipe, *args, **kw):
- self.monitor_pid = monitor_pid
- self.interval = interval
- self.pipe = pipe
- self.cont = True
- super(Timer, self).__init__(*args, **kw)
-
- def run(self):
- m = _get_memory(self.monitor_pid)
- timings = [m]
- self.pipe.send(0) # we're ready
- while not self.pipe.poll(self.interval):
- m = _get_memory(self.monitor_pid)
- timings.append(m)
- self.pipe.send(timings)
-
-
-def memory_usage(proc=-1, interval=.1, timeout=None):
- """
- Return the memory usage of a process or piece of code
-
- Parameters
- ----------
- proc : {int, string, tuple, subprocess.Popen}, optional
- The process to monitor. Can be given by an integer/string
- representing a PID, by a Popen object or by a tuple
- representing a Python function. The tuple contains three
- values (f, args, kw) and specifies to run the function
- f(*args, **kw).
- Set to -1 (default) for current process.
-
- interval : float, optional
- Interval at which measurements are collected.
-
- timeout : float, optional
- Maximum amount of time (in seconds) to wait before returning.
-
- Returns
- -------
- mem_usage : list of floating-poing values
- memory usage, in MB. It's length is always < timeout / interval
- """
- ret = []
-
- if timeout is not None:
- max_iter = int(timeout / interval)
- elif isinstance(proc, int):
- # external process and no timeout
- max_iter = 1
- else:
- # for a Python function wait until it finishes
- max_iter = float('inf')
-
- if hasattr(proc, '__call__'):
- proc = (proc, (), {})
- if isinstance(proc, (list, tuple)):
- if len(proc) == 1:
- f, args, kw = (proc[0], (), {})
- elif len(proc) == 2:
- f, args, kw = (proc[0], proc[1], {})
- elif len(proc) == 3:
- f, args, kw = (proc[0], proc[1], proc[2])
- else:
- raise ValueError
-
- aspec = inspect.getargspec(f)
- n_args = len(aspec.args)
- if aspec.defaults is not None:
- n_args -= len(aspec.defaults)
- if n_args != len(args):
- raise ValueError(
- 'Function expects %s value(s) but %s where given'
- % (n_args, len(args)))
-
- child_conn, parent_conn = Pipe() # this will store Timer's results
- p = Timer(os.getpid(), interval, child_conn)
- p.start()
- parent_conn.recv() # wait until we start getting memory
- f(*args, **kw)
- parent_conn.send(0) # finish timing
- ret = parent_conn.recv()
- p.join(5 * interval)
- elif isinstance(proc, subprocess.Popen):
- # external process, launched from Python
- while True:
- ret.append(_get_memory(proc.pid))
- time.sleep(interval)
- if timeout is not None:
- max_iter -= 1
- if max_iter == 0:
- break
- if proc.poll() is not None:
- break
- else:
- # external process
- if proc == -1:
- proc = os.getpid()
- if max_iter == -1:
- max_iter = 1
- counter = 0
- while counter < max_iter:
- counter += 1
- ret.append(_get_memory(proc))
- time.sleep(interval)
- return ret
-
-# ..
-# .. utility functions for line-by-line ..
-
-def _find_script(script_name):
- """ Find the script.
-
- If the input is not a file, then $PATH will be searched.
- """
- if os.path.isfile(script_name):
- return script_name
- path = os.getenv('PATH', os.defpath).split(os.pathsep)
- for folder in path:
- if folder == '':
- continue
- fn = os.path.join(folder, script_name)
- if os.path.isfile(fn):
- return fn
-
- sys.stderr.write('Could not find script {0}\n'.format(script_name))
- raise SystemExit(1)
-
-
-class LineProfiler:
- """ A profiler that records the amount of memory for each line """
-
- def __init__(self, **kw):
- self.functions = list()
- self.code_map = {}
- self.enable_count = 0
- self.max_mem = kw.get('max_mem', None)
-
- def __call__(self, func):
- self.add_function(func)
- f = self.wrap_function(func)
- f.__module__ = func.__module__
- f.__name__ = func.__name__
- f.__doc__ = func.__doc__
- f.__dict__.update(getattr(func, '__dict__', {}))
- return f
-
- def add_function(self, func):
- """ Record line profiling information for the given Python function.
- """
- try:
- # func_code does not exist in Python3
- code = func.__code__
- except AttributeError:
- import warnings
- warnings.warn("Could not extract a code object for the object %r"
- % (func,))
- return
- if code not in self.code_map:
- self.code_map[code] = {}
- self.functions.append(func)
-
- def wrap_function(self, func):
- """ Wrap a function to profile it.
- """
-
- def f(*args, **kwds):
- self.enable_by_count()
- try:
- result = func(*args, **kwds)
- finally:
- self.disable_by_count()
- return result
- return f
-
- def run(self, cmd):
- """ Profile a single executable statment in the main namespace.
- """
- import __main__
- main_dict = __main__.__dict__
- return self.runctx(cmd, main_dict, main_dict)
-
- def runctx(self, cmd, globals, locals):
- """ Profile a single executable statement in the given namespaces.
- """
- self.enable_by_count()
- try:
- exec(cmd, globals, locals)
- finally:
- self.disable_by_count()
- return self
-
- def runcall(self, func, *args, **kw):
- """ Profile a single function call.
- """
- # XXX where is this used ? can be removed ?
- self.enable_by_count()
- try:
- return func(*args, **kw)
- finally:
- self.disable_by_count()
-
- def enable_by_count(self):
- """ Enable the profiler if it hasn't been enabled before.
- """
- if self.enable_count == 0:
- self.enable()
- self.enable_count += 1
-
- def disable_by_count(self):
- """ Disable the profiler if the number of disable requests matches the
- number of enable requests.
- """
- if self.enable_count > 0:
- self.enable_count -= 1
- if self.enable_count == 0:
- self.disable()
-
- def trace_memory_usage(self, frame, event, arg):
- """Callback for sys.settrace"""
- if event in ('line', 'return') and frame.f_code in self.code_map:
- lineno = frame.f_lineno
- if event == 'return':
- lineno += 1
- entry = self.code_map[frame.f_code].setdefault(lineno, [])
- entry.append(_get_memory(os.getpid()))
-
- return self.trace_memory_usage
-
- def trace_max_mem(self, frame, event, arg):
- # run into PDB as soon as memory is higher than MAX_MEM
- if event in ('line', 'return') and frame.f_code in self.code_map:
- c = _get_memory(os.getpid())
- if c >= self.max_mem:
- t = 'Current memory {0:.2f} MB exceeded the maximum '.format(c) + \
- 'of {0:.2f} MB\n'.format(self.max_mem)
- sys.stdout.write(t)
- sys.stdout.write('Stepping into the debugger \n')
- frame.f_lineno -= 2
- p = pdb.Pdb()
- p.quitting = False
- p.stopframe = frame
- p.returnframe = None
- p.stoplineno = frame.f_lineno - 3
- p.botframe = None
- return p.trace_dispatch
-
- return self.trace_max_mem
-
- def __enter__(self):
- self.enable_by_count()
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.disable_by_count()
-
- def enable(self):
- if self.max_mem is not None:
- sys.settrace(self.trace_max_mem)
- else:
- sys.settrace(self.trace_memory_usage)
-
- def disable(self):
- self.last_time = {}
- sys.settrace(None)
-
-
-def show_results(prof, stream=None, precision=3):
- if stream is None:
- stream = sys.stdout
- template = '{0:>6} {1:>12} {2:>12} {3:<}'
-
- for code in prof.code_map:
- lines = prof.code_map[code]
- if not lines:
- # .. measurements are empty ..
- continue
- filename = code.co_filename
- if filename.endswith((".pyc", ".pyo")):
- filename = filename[:-1]
- stream.write('Filename: ' + filename + '\n\n')
- if not os.path.exists(filename):
- stream.write('ERROR: Could not find file ' + filename + '\n')
- if filename.startswith("ipython-input") or filename.startswith("
-
- The given statement (which doesn't require quote marks) is run via the
- LineProfiler. Profiling is enabled for the functions specified by the -f
- options. The statistics will be shown side-by-side with the code through
- the pager once the statement has completed.
-
- Options:
-
- -f : LineProfiler only profiles functions and methods it is told
- to profile. This option tells the profiler about these functions. Multiple
- -f options may be used. The argument may be any expression that gives
- a Python function or method object. However, one must be careful to avoid
- spaces that may confuse the option parser. Additionally, functions defined
- in the interpreter at the In[] prompt or via %run currently cannot be
- displayed. Write these functions out to a separate file and import them.
-
- One or more -f options are required to get any useful results.
-
- -T : dump the text-formatted statistics with the code
- side-by-side out to a text file.
-
- -r: return the LineProfiler object after it has completed profiling.
- """
- try:
- from StringIO import StringIO
- except ImportError: # Python 3.x
- from io import StringIO
-
- # Local imports to avoid hard dependency.
- from distutils.version import LooseVersion
- import IPython
- ipython_version = LooseVersion(IPython.__version__)
- if ipython_version < '0.11':
- from IPython.genutils import page
- from IPython.ipstruct import Struct
- from IPython.ipapi import UsageError
- else:
- from IPython.core.page import page
- from IPython.utils.ipstruct import Struct
- from IPython.core.error import UsageError
-
- # Escape quote markers.
- opts_def = Struct(T=[''], f=[])
- parameter_s = parameter_s.replace('"', r'\"').replace("'", r"\'")
- opts, arg_str = self.parse_options(parameter_s, 'rf:T:', list_all=True)
- opts.merge(opts_def)
- global_ns = self.shell.user_global_ns
- local_ns = self.shell.user_ns
-
- # Get the requested functions.
- funcs = []
- for name in opts.f:
- try:
- funcs.append(eval(name, global_ns, local_ns))
- except Exception as e:
- raise UsageError('Could not find function %r.\n%s: %s' % (name,
- e.__class__.__name__, e))
-
- profile = LineProfiler()
- for func in funcs:
- profile(func)
-
- # Add the profiler to the builtins for @profile.
- try:
- import builtins
- except ImportError: # Python 3x
- import __builtin__ as builtins
-
- if 'profile' in builtins.__dict__:
- had_profile = True
- old_profile = builtins.__dict__['profile']
- else:
- had_profile = False
- old_profile = None
- builtins.__dict__['profile'] = profile
-
- try:
- try:
- profile.runctx(arg_str, global_ns, local_ns)
- message = ''
- except SystemExit:
- message = "*** SystemExit exception caught in code being profiled."
- except KeyboardInterrupt:
- message = ("*** KeyboardInterrupt exception caught in code being "
- "profiled.")
- finally:
- if had_profile:
- builtins.__dict__['profile'] = old_profile
-
- # Trap text output.
- stdout_trap = StringIO()
- show_results(profile, stdout_trap)
- output = stdout_trap.getvalue()
- output = output.rstrip()
-
- if ipython_version < '0.11':
- page(output, screen_lines=self.shell.rc.screen_length)
- else:
- page(output)
- print(message,)
-
- text_file = opts.T[0]
- if text_file:
- with open(text_file, 'w') as pfile:
- pfile.write(output)
- print('\n*** Profile printout saved to text file %s. %s' % (text_file,
- message))
-
- return_value = None
- if 'r' in opts:
- return_value = profile
-
- return return_value
-
-
-def _func_exec(stmt, ns):
- # helper for magic_memit, just a function proxy for the exec
- # statement
- exec(stmt, ns)
-
-# a timeit-style %memit magic for IPython
-def magic_memit(self, line=''):
- """Measure memory usage of a Python statement
-
- Usage, in line mode:
- %memit [-rt] statement
-
- Options:
- -r: repeat the loop iteration times and take the best result.
- Default: 1
-
- -t: timeout after seconds. Default: None
-
- Examples
- --------
- ::
-
- In [1]: import numpy as np
-
- In [2]: %memit np.zeros(1e7)
- maximum of 1: 76.402344 MB per loop
-
- In [3]: %memit np.ones(1e6)
- maximum of 1: 7.820312 MB per loop
-
- In [4]: %memit -r 10 np.empty(1e8)
- maximum of 10: 0.101562 MB per loop
-
- """
- opts, stmt = self.parse_options(line, 'r:t', posix=False, strict=False)
- repeat = int(getattr(opts, 'r', 1))
- if repeat < 1:
- repeat == 1
- timeout = int(getattr(opts, 't', 0))
- if timeout <= 0:
- timeout = None
-
- mem_usage = []
- for _ in range(repeat):
- tmp = memory_usage((_func_exec, (stmt, self.shell.user_ns)), timeout=timeout)
- mem_usage.extend(tmp)
-
- if mem_usage:
- print('maximum of %d: %f MB per loop' % (repeat, max(mem_usage)))
- else:
- print('ERROR: could not read memory usage, try with a lower interval or more iterations')
-
-
-def load_ipython_extension(ip):
- """This is called to load the module as an IPython extension."""
- ip.define_magic('mprun', magic_mprun)
- ip.define_magic('memit', magic_memit)
-
-
-def profile(func, stream=None):
- """
- Decorator that will run the function and print a line-by-line profile
- """
- def wrapper(*args, **kwargs):
- prof = LineProfiler()
- val = prof(func)(*args, **kwargs)
- show_results(prof, stream=stream)
- return val
- return wrapper
-
-
-if __name__ == '__main__':
- from optparse import OptionParser
- parser = OptionParser(usage=_CMD_USAGE, version=__version__)
- parser.disable_interspersed_args()
- parser.add_option("--pdb-mmem", dest="max_mem", metavar="MAXMEM",
- type="float", action="store",
- help="step into the debugger when memory exceeds MAXMEM")
- parser.add_option('--precision', dest="precision", type="int",
- action="store", default=3,
- help="precision of memory output in number of significant digits")
-
- if not sys.argv[1:]:
- parser.print_help()
- sys.exit(2)
-
- (options, args) = parser.parse_args()
- del sys.argv[0] # Hide "memory_profiler.py" from argument list
-
- prof = LineProfiler(max_mem=options.max_mem)
- __file__ = _find_script(args[0])
- try:
- if sys.version_info[0] < 3:
- import __builtin__
- __builtin__.__dict__['profile'] = prof
- ns = copy(locals())
- ns['profile'] = prof # shadow the profile decorator defined above
- execfile(__file__, ns, ns)
- else:
- import builtins
- builtins.__dict__['profile'] = prof
- ns = copy(locals())
- ns['profile'] = prof # shadow the profile decorator defined above
- exec(compile(open(__file__).read(), __file__, 'exec'),
- ns, copy(globals()))
- finally:
- show_results(prof, precision=options.precision)
diff --git a/python-packages/mimeparse.py b/python-packages/mimeparse.py
deleted file mode 100755
index 4cb8eacaee..0000000000
--- a/python-packages/mimeparse.py
+++ /dev/null
@@ -1,168 +0,0 @@
-"""MIME-Type Parser
-
-This module provides basic functions for handling mime-types. It can handle
-matching mime-types against a list of media-ranges. See section 14.1 of the
-HTTP specification [RFC 2616] for a complete explanation.
-
- http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
-
-Contents:
- - parse_mime_type(): Parses a mime-type into its component parts.
- - parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
- quality parameter.
- - quality(): Determines the quality ('q') of a mime-type when
- compared against a list of media-ranges.
- - quality_parsed(): Just like quality() except the second parameter must be
- pre-parsed.
- - best_match(): Choose the mime-type with the highest quality ('q')
- from a list of candidates.
-"""
-from functools import reduce
-
-__version__ = '0.1.4'
-__author__ = 'Joe Gregorio'
-__email__ = 'joe@bitworking.org'
-__license__ = 'MIT License'
-__credits__ = ''
-
-
-def parse_mime_type(mime_type):
- """Parses a mime-type into its component parts.
-
- Carves up a mime-type and returns a tuple of the (type, subtype, params)
- where 'params' is a dictionary of all the parameters for the media range.
- For example, the media range 'application/xhtml;q=0.5' would get parsed
- into:
-
- ('application', 'xhtml', {'q', '0.5'})
- """
- parts = mime_type.split(';')
- params = dict([tuple([s.strip() for s in param.split('=', 1)])
- for param in parts[1:]
- ])
- full_type = parts[0].strip()
- # Java URLConnection class sends an Accept header that includes a
- # single '*'. Turn it into a legal wildcard.
- if full_type == '*':
- full_type = '*/*'
- (type, subtype) = full_type.split('/')
-
- return (type.strip(), subtype.strip(), params)
-
-
-def parse_media_range(range):
- """Parse a media-range into its component parts.
-
- Carves up a media range and returns a tuple of the (type, subtype,
- params) where 'params' is a dictionary of all the parameters for the media
- range. For example, the media range 'application/*;q=0.5' would get parsed
- into:
-
- ('application', '*', {'q', '0.5'})
-
- In addition this function also guarantees that there is a value for 'q'
- in the params dictionary, filling it in with a proper default if
- necessary.
- """
- (type, subtype, params) = parse_mime_type(range)
- if not 'q' in params or not params['q'] or \
- not float(params['q']) or float(params['q']) > 1\
- or float(params['q']) < 0:
- params['q'] = '1'
-
- return (type, subtype, params)
-
-
-def fitness_and_quality_parsed(mime_type, parsed_ranges):
- """Find the best match for a mime-type amongst parsed media-ranges.
-
- Find the best match for a given mime-type against a list of media_ranges
- that have already been parsed by parse_media_range(). Returns a tuple of
- the fitness value and the value of the 'q' quality parameter of the best
- match, or (-1, 0) if no match was found. Just as for quality_parsed(),
- 'parsed_ranges' must be a list of parsed media ranges.
- """
- best_fitness = -1
- best_fit_q = 0
- (target_type, target_subtype, target_params) =\
- parse_media_range(mime_type)
- for (type, subtype, params) in parsed_ranges:
- type_match = (type == target_type or
- type == '*' or
- target_type == '*')
- subtype_match = (subtype == target_subtype or
- subtype == '*' or
- target_subtype == '*')
- if type_match and subtype_match:
- param_matches = reduce(lambda x, y: x + y, [1 for (key, value) in
- list(target_params.items()) if key != 'q' and
- key in params and value == params[key]], 0)
- fitness = (type == target_type) and 100 or 0
- fitness += (subtype == target_subtype) and 10 or 0
- fitness += param_matches
- if fitness > best_fitness:
- best_fitness = fitness
- best_fit_q = params['q']
-
- return best_fitness, float(best_fit_q)
-
-
-def quality_parsed(mime_type, parsed_ranges):
- """Find the best match for a mime-type amongst parsed media-ranges.
-
- Find the best match for a given mime-type against a list of media_ranges
- that have already been parsed by parse_media_range(). Returns the 'q'
- quality parameter of the best match, 0 if no match was found. This function
- bahaves the same as quality() except that 'parsed_ranges' must be a list of
- parsed media ranges. """
-
- return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
-
-
-def quality(mime_type, ranges):
- """Return the quality ('q') of a mime-type against a list of media-ranges.
-
- Returns the quality 'q' of a mime-type when compared against the
- media-ranges in ranges. For example:
-
- >>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
- text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
- 0.7
-
- """
- parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
-
- return quality_parsed(mime_type, parsed_ranges)
-
-
-def best_match(supported, header):
- """Return mime-type with the highest quality ('q') from list of candidates.
-
- Takes a list of supported mime-types and finds the best match for all the
- media-ranges listed in header. The value of header must be a string that
- conforms to the format of the HTTP Accept: header. The value of 'supported'
- is a list of mime-types. The list of supported mime-types should be sorted
- in order of increasing desirability, in case of a situation where there is
- a tie.
-
- >>> best_match(['application/xbel+xml', 'text/xml'],
- 'text/*;q=0.5,*/*; q=0.1')
- 'text/xml'
- """
- split_header = _filter_blank(header.split(','))
- parsed_header = [parse_media_range(r) for r in split_header]
- weighted_matches = []
- pos = 0
- for mime_type in supported:
- weighted_matches.append((fitness_and_quality_parsed(mime_type,
- parsed_header), pos, mime_type))
- pos += 1
- weighted_matches.sort()
-
- return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
-
-
-def _filter_blank(i):
- for s in i:
- if s.strip():
- yield s
diff --git a/python-packages/oauth.py b/python-packages/oauth.py
deleted file mode 100644
index a7694c10d9..0000000000
--- a/python-packages/oauth.py
+++ /dev/null
@@ -1,666 +0,0 @@
-"""
-The MIT License
-
-Copyright (c) 2007 Leah Culver
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-"""
-
-import logging
-logger = logging.getLogger()
-
-import cgi
-import urllib
-import time
-import random
-import urlparse
-import hmac
-import binascii
-
-
-VERSION = '1.0' # Hi Blaine!
-HTTP_METHOD = 'GET'
-SIGNATURE_METHOD = 'PLAINTEXT'
-
-
-class OAuthError(RuntimeError):
- """Generic exception class."""
- def __init__(self, message='OAuth error occured.'):
- self.message = message
-
-def build_authenticate_header(realm=''):
- """Optional WWW-Authenticate header (401 error)"""
- return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
-
-def escape(s):
- """Escape a URL including any /."""
- return urllib.quote(s, safe='~')
-
-def _utf8_str(s):
- """Convert unicode to utf-8."""
- if isinstance(s, unicode):
- return s.encode("utf-8")
- else:
- return str(s)
-
-def generate_timestamp():
- """Get seconds since epoch (UTC)."""
- return int(time.time())
-
-def generate_nonce(length=8):
- """Generate pseudorandom number."""
- return ''.join([str(random.randint(0, 9)) for i in range(length)])
-
-def generate_verifier(length=8):
- """Generate pseudorandom number."""
- return ''.join([str(random.randint(0, 9)) for i in range(length)])
-
-
-class OAuthConsumer(object):
- """Consumer of OAuth authentication.
-
- OAuthConsumer is a data type that represents the identity of the Consumer
- via its shared secret with the Service Provider.
-
- """
- key = None
- secret = None
-
- def __init__(self, key, secret):
- self.key = key
- self.secret = secret
-
-
-class OAuthToken(object):
- """OAuthToken is a data type that represents an End User via either an access
- or request token.
-
- key -- the token
- secret -- the token secret
-
- """
- key = None
- secret = None
- callback = None
- callback_confirmed = None
- verifier = None
-
- def __init__(self, key, secret):
- self.key = key
- self.secret = secret
-
- def set_callback(self, callback):
- self.callback = callback
- self.callback_confirmed = 'true'
-
- def set_verifier(self, verifier=None):
- if verifier is not None:
- self.verifier = verifier
- else:
- self.verifier = generate_verifier()
-
- def get_callback_url(self):
- if self.callback and self.verifier:
- # Append the oauth_verifier.
- parts = urlparse.urlparse(self.callback)
- scheme, netloc, path, params, query, fragment = parts[:6]
- if query:
- query = '%s&oauth_verifier=%s' % (query, self.verifier)
- else:
- query = 'oauth_verifier=%s' % self.verifier
- return urlparse.urlunparse((scheme, netloc, path, params,
- query, fragment))
- return self.callback
-
- def to_string(self):
- data = {
- 'oauth_token': self.key,
- 'oauth_token_secret': self.secret,
- }
- if self.callback_confirmed is not None:
- data['oauth_callback_confirmed'] = self.callback_confirmed
- return urllib.urlencode(data)
-
- def from_string(s):
- """ Returns a token from something like:
- oauth_token_secret=xxx&oauth_token=xxx
- """
- params = cgi.parse_qs(s, keep_blank_values=False)
- key = params['oauth_token'][0]
- secret = params['oauth_token_secret'][0]
- token = OAuthToken(key, secret)
- try:
- token.callback_confirmed = params['oauth_callback_confirmed'][0]
- except KeyError:
- pass # 1.0, no callback confirmed.
- return token
- from_string = staticmethod(from_string)
-
- def __str__(self):
- return self.to_string()
-
-
-class OAuthRequest(object):
- """OAuthRequest represents the request and can be serialized.
-
- OAuth parameters:
- - oauth_consumer_key
- - oauth_token
- - oauth_signature_method
- - oauth_signature
- - oauth_timestamp
- - oauth_nonce
- - oauth_version
- - oauth_verifier
- ... any additional parameters, as defined by the Service Provider.
- """
- parameters = None # OAuth parameters.
- http_method = HTTP_METHOD
- http_url = None
- version = VERSION
-
- def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
- self.http_method = http_method
- self.http_url = http_url
- self.parameters = parameters or {}
-
- def set_parameter(self, parameter, value):
- self.parameters[parameter] = value
-
- def get_parameter(self, parameter):
- try:
- return self.parameters[parameter]
- except:
- raise OAuthError('Parameter not found: %s' % parameter)
-
- def _get_timestamp_nonce(self):
- return self.get_parameter('oauth_timestamp'), self.get_parameter(
- 'oauth_nonce')
-
- def get_nonoauth_parameters(self):
- """Get any non-OAuth parameters."""
- parameters = {}
- for k, v in self.parameters.iteritems():
- # Ignore oauth parameters.
- if k.find('oauth_') < 0:
- parameters[k] = v
- return parameters
-
- def to_header(self, realm=''):
- """Serialize as a header for an HTTPAuth request."""
- auth_header = 'OAuth realm="%s"' % realm
- # Add the oauth parameters.
- if self.parameters:
- for k, v in self.parameters.iteritems():
- if k[:6] == 'oauth_':
- auth_header += ', %s="%s"' % (k, escape(str(v)))
- return {'Authorization': auth_header}
-
- def to_postdata(self):
- """Serialize as post data for a POST request."""
- return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
- for k, v in self.parameters.iteritems()])
-
- def to_url(self):
- """Serialize as a URL for a GET request."""
- return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
-
- def get_normalized_parameters(self):
- """Return a string that contains the parameters that must be signed."""
- params = self.parameters
- try:
- # Exclude the signature if it exists.
- del params['oauth_signature']
- except:
- pass
- # Escape key values before sorting.
- key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
- for k,v in params.items()]
- # Sort lexicographically, first after key, then after value.
- key_values.sort()
- # Combine key value pairs into a string.
- return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
-
- def get_normalized_http_method(self):
- """Uppercases the http method."""
- return self.http_method.upper()
-
- def get_normalized_http_url(self):
- """Parses the URL and rebuilds it to be scheme://host/path."""
- parts = urlparse.urlparse(self.http_url)
- scheme, netloc, path = parts[:3]
- # Exclude default port numbers.
- if scheme == 'http' and netloc[-3:] == ':80':
- netloc = netloc[:-3]
- elif scheme == 'https' and netloc[-4:] == ':443':
- netloc = netloc[:-4]
- return '%s://%s%s' % (scheme, netloc, path)
-
- def sign_request(self, signature_method, consumer, token):
- """Set the signature parameter to the result of build_signature."""
- # Set the signature method.
- self.set_parameter('oauth_signature_method',
- signature_method.get_name())
- # Set the signature.
- self.set_parameter('oauth_signature',
- self.build_signature(signature_method, consumer, token))
-
- def build_signature(self, signature_method, consumer, token):
- """Calls the build signature method within the signature method."""
- return signature_method.build_signature(self, consumer, token)
-
- def from_request(http_method, http_url, headers=None, parameters=None,
- query_string=None):
- """Combines multiple parameter sources."""
- if parameters is None:
- parameters = {}
-
- # Headers
- if headers and 'Authorization' in headers:
- auth_header = headers['Authorization']
- # Check that the authorization header is OAuth.
- if auth_header[:6] == 'OAuth ':
- auth_header = auth_header[6:]
- try:
- # Get the parameters from the header.
- header_params = OAuthRequest._split_header(auth_header)
- parameters.update(header_params)
- except:
- raise OAuthError('Unable to parse OAuth parameters from '
- 'Authorization header.')
-
- # GET or POST query string.
- if query_string:
- query_params = OAuthRequest._split_url_string(query_string)
- parameters.update(query_params)
-
- # URL parameters.
- param_str = urlparse.urlparse(http_url)[4] # query
- url_params = OAuthRequest._split_url_string(param_str)
- parameters.update(url_params)
-
- if parameters:
- return OAuthRequest(http_method, http_url, parameters)
-
- return None
- from_request = staticmethod(from_request)
-
- def from_consumer_and_token(oauth_consumer, token=None,
- callback=None, verifier=None, http_method=HTTP_METHOD,
- http_url=None, parameters=None):
- if not parameters:
- parameters = {}
-
- defaults = {
- 'oauth_consumer_key': oauth_consumer.key,
- 'oauth_timestamp': generate_timestamp(),
- 'oauth_nonce': generate_nonce(),
- 'oauth_version': OAuthRequest.version,
- }
-
- defaults.update(parameters)
- parameters = defaults
-
- if token:
- parameters['oauth_token'] = token.key
- if token.callback:
- parameters['oauth_callback'] = token.callback
- # 1.0a support for verifier.
- if verifier:
- parameters['oauth_verifier'] = verifier
- elif callback:
- # 1.0a support for callback in the request token request.
- parameters['oauth_callback'] = callback
-
- return OAuthRequest(http_method, http_url, parameters)
- from_consumer_and_token = staticmethod(from_consumer_and_token)
-
- def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
- http_url=None, parameters=None):
- if not parameters:
- parameters = {}
-
- parameters['oauth_token'] = token.key
-
- if callback:
- parameters['oauth_callback'] = callback
-
- return OAuthRequest(http_method, http_url, parameters)
- from_token_and_callback = staticmethod(from_token_and_callback)
-
- def _split_header(header):
- """Turn Authorization: header into parameters."""
- params = {}
- parts = header.split(',')
- for param in parts:
- # Ignore realm parameter.
- if param.find('realm') > -1:
- continue
- # Remove whitespace.
- param = param.strip()
- # Split key-value.
- param_parts = param.split('=', 1)
- # Remove quotes and unescape the value.
- params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
- return params
- _split_header = staticmethod(_split_header)
-
- def _split_url_string(param_str):
- """Turn URL string into parameters."""
- parameters = cgi.parse_qs(param_str, keep_blank_values=False)
- for k, v in parameters.iteritems():
- parameters[k] = urllib.unquote(v[0])
- return parameters
- _split_url_string = staticmethod(_split_url_string)
-
-class OAuthServer(object):
- """A worker to check the validity of a request against a data store."""
- timestamp_threshold = 300 # In seconds, five minutes.
- version = VERSION
- signature_methods = None
- data_store = None
-
- def __init__(self, data_store=None, signature_methods=None):
- self.data_store = data_store
- self.signature_methods = signature_methods or {}
-
- def set_data_store(self, data_store):
- self.data_store = data_store
-
- def get_data_store(self):
- return self.data_store
-
- def add_signature_method(self, signature_method):
- self.signature_methods[signature_method.get_name()] = signature_method
- return self.signature_methods
-
- def fetch_request_token(self, oauth_request):
- """Processes a request_token request and returns the
- request token on success.
- """
- try:
- # Get the request token for authorization.
- token = self._get_token(oauth_request, 'request')
- except OAuthError:
- # No token required for the initial token request.
- version = self._get_version(oauth_request)
- consumer = self._get_consumer(oauth_request)
- try:
- callback = self.get_callback(oauth_request)
- except OAuthError:
- callback = None # 1.0, no callback specified.
- self._check_signature(oauth_request, consumer, None)
- # Fetch a new token.
- token = self.data_store.fetch_request_token(consumer, callback)
- return token
-
- def fetch_access_token(self, oauth_request):
- logger.debug("!!! IN OAuthServer.fetch_access_token OAuth Params: %s"%oauth_request.parameters)
-
- """Processes an access_token request and returns the
- access token on success.
- """
- version = self._get_version(oauth_request)
- consumer = self._get_consumer(oauth_request)
- try:
- verifier = self._get_verifier(oauth_request)
- except OAuthError:
- verifier = None
- # Get the request token.
- token = self._get_token(oauth_request, 'request')
- self._check_signature(oauth_request, consumer, token)
- new_token = self.data_store.fetch_access_token(consumer, token, verifier)
- return new_token
-
- def verify_request(self, oauth_request):
- """Verifies an api call and checks all the parameters."""
- # -> consumer and token
- version = self._get_version(oauth_request)
- consumer = self._get_consumer(oauth_request)
- # Get the access token.
- token = self._get_token(oauth_request, 'access')
- self._check_signature(oauth_request, consumer, token)
- parameters = oauth_request.get_nonoauth_parameters()
- return consumer, token, parameters
-
- def authorize_token(self, token, user):
- """Authorize a request token."""
- return self.data_store.authorize_request_token(token, user)
-
- def get_callback(self, oauth_request):
- """Get the callback URL."""
- return oauth_request.get_parameter('oauth_callback')
-
- def build_authenticate_header(self, realm=''):
- """Optional support for the authenticate header."""
- return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
-
- def _get_version(self, oauth_request):
- """Verify the correct version request for this server."""
- try:
- version = oauth_request.get_parameter('oauth_version')
- except:
- version = VERSION
- if version and version != self.version:
- raise OAuthError('OAuth version %s not supported.' % str(version))
- return version
-
- def _get_signature_method(self, oauth_request):
- """Figure out the signature with some defaults."""
- try:
- signature_method = oauth_request.get_parameter(
- 'oauth_signature_method')
- except:
- signature_method = SIGNATURE_METHOD
- try:
- # Get the signature method object.
- signature_method = self.signature_methods[signature_method]
- except:
- signature_method_names = ', '.join(self.signature_methods.keys())
- raise OAuthError('Signature method %s not supported try one of the '
- 'following: %s' % (signature_method, signature_method_names))
-
- return signature_method
-
- def _get_consumer(self, oauth_request):
- consumer_key = oauth_request.get_parameter('oauth_consumer_key')
- consumer = self.data_store.lookup_consumer(consumer_key)
- if not consumer:
- raise OAuthError('Invalid consumer.')
- return consumer
-
- def _get_token(self, oauth_request, token_type='access'):
- """Try to find the token for the provided request token key."""
- token_field = oauth_request.get_parameter('oauth_token')
- token = self.data_store.lookup_token(token_type, token_field)
- if not token:
- raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
- return token
-
- def _get_verifier(self, oauth_request):
- return oauth_request.get_parameter('oauth_verifier')
-
- def _check_signature(self, oauth_request, consumer, token):
- timestamp, nonce = oauth_request._get_timestamp_nonce()
- self._check_timestamp(timestamp)
- self._check_nonce(consumer, token, nonce)
- signature_method = self._get_signature_method(oauth_request)
- try:
- signature = oauth_request.get_parameter('oauth_signature')
- except:
- raise OAuthError('Missing signature.')
- # Validate the signature.
- valid_sig = signature_method.check_signature(oauth_request, consumer,
- token, signature)
- if not valid_sig:
- key, base = signature_method.build_signature_base_string(
- oauth_request, consumer, token)
- logging.error("key: %s",key)
- logging.error("base: %s",base)
- raise OAuthError('Invalid signature. Expected signature base '
- 'string: %s' % base)
- built = signature_method.build_signature(oauth_request, consumer, token)
-
- def _check_timestamp(self, timestamp):
- """Verify that timestamp is recentish."""
- timestamp = int(timestamp)
- now = int(time.time())
- lapsed = abs(now - timestamp)
- if lapsed > self.timestamp_threshold:
- raise OAuthError('Expired timestamp: given %d and now %s has a '
- 'greater difference than threshold %d' %
- (timestamp, now, self.timestamp_threshold))
-
- def _check_nonce(self, consumer, token, nonce):
- """Verify that the nonce is uniqueish."""
- nonce = self.data_store.lookup_nonce(consumer, token, nonce)
- if nonce:
- raise OAuthError('Nonce already used: %s' % str(nonce))
-
-
-class OAuthClient(object):
- """OAuthClient is a worker to attempt to execute a request."""
- consumer = None
- token = None
-
- def __init__(self, oauth_consumer, oauth_token):
- self.consumer = oauth_consumer
- self.token = oauth_token
-
- def get_consumer(self):
- return self.consumer
-
- def get_token(self):
- return self.token
-
- def fetch_request_token(self, oauth_request):
- """-> OAuthToken."""
- raise NotImplementedError
-
- def fetch_access_token(self, oauth_request):
- """-> OAuthToken."""
- raise NotImplementedError
-
- def access_resource(self, oauth_request):
- """-> Some protected resource."""
- raise NotImplementedError
-
-
-class OAuthDataStore(object):
- """A database abstraction used to lookup consumers and tokens."""
-
- def lookup_consumer(self, key):
- """-> OAuthConsumer."""
- raise NotImplementedError
-
- def lookup_token(self, oauth_consumer, token_type, token_token):
- """-> OAuthToken."""
- raise NotImplementedError
-
- def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
- """-> OAuthToken."""
- raise NotImplementedError
-
- def fetch_request_token(self, oauth_consumer, oauth_callback):
- """-> OAuthToken."""
- raise NotImplementedError
-
- def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
- """-> OAuthToken."""
- raise NotImplementedError
-
- def authorize_request_token(self, oauth_token, user):
- """-> OAuthToken."""
- raise NotImplementedError
-
-
-class OAuthSignatureMethod(object):
- """A strategy class that implements a signature method."""
- def get_name(self):
- """-> str."""
- raise NotImplementedError
-
- def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
- """-> str key, str raw."""
- raise NotImplementedError
-
- def build_signature(self, oauth_request, oauth_consumer, oauth_token):
- """-> str."""
- raise NotImplementedError
-
- def check_signature(self, oauth_request, consumer, token, signature):
- built = self.build_signature(oauth_request, consumer, token)
- logging.info("Built signature: %s"%(built))
- return built == signature
-
-
-class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
-
- def get_name(self):
- return 'HMAC-SHA1'
-
- def build_signature_base_string(self, oauth_request, consumer, token):
- sig = (
- escape(oauth_request.get_normalized_http_method()),
- escape(oauth_request.get_normalized_http_url()),
- escape(oauth_request.get_normalized_parameters()),
- )
-
- key = '%s&' % escape(consumer.secret)
- if token:
- key += escape(token.secret)
- raw = '&'.join(sig)
- return key, raw
-
- def build_signature(self, oauth_request, consumer, token):
- """Builds the base signature string."""
- key, raw = self.build_signature_base_string(oauth_request, consumer,
- token)
-
- if isinstance(key, unicode):
- key = str(key)
-
- # HMAC object.
- try:
- import hashlib # 2.5
- hashed = hmac.new(key, raw, hashlib.sha1)
- except:
- import sha # Deprecated
- hashed = hmac.new(key, raw, sha)
-
- # Calculate the digest base 64.
- return binascii.b2a_base64(hashed.digest())[:-1]
-
-
-class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
-
- def get_name(self):
- return 'PLAINTEXT'
-
- def build_signature_base_string(self, oauth_request, consumer, token):
- """Concatenates the consumer key and secret."""
- sig = '%s&' % escape(consumer.secret)
- if token:
- sig = sig + escape(token.secret)
- return sig, sig
-
- def build_signature(self, oauth_request, consumer, token):
- key, raw = self.build_signature_base_string(oauth_request, consumer,
- token)
- return key
diff --git a/python-packages/pbkdf2.py b/python-packages/pbkdf2.py
deleted file mode 100644
index b1e717519c..0000000000
--- a/python-packages/pbkdf2.py
+++ /dev/null
@@ -1,313 +0,0 @@
-#!/usr/bin/python
-# -*- coding: ascii -*-
-###########################################################################
-# pbkdf2 - PKCS#5 v2.0 Password-Based Key Derivation
-#
-# Copyright (C) 2007-2011 Dwayne C. Litzenberger
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-# Country of origin: Canada
-#
-###########################################################################
-# Sample PBKDF2 usage:
-# from Crypto.Cipher import AES
-# from pbkdf2 import PBKDF2
-# import os
-#
-# salt = os.urandom(8) # 64-bit salt
-# key = PBKDF2("This passphrase is a secret.", salt).read(32) # 256-bit key
-# iv = os.urandom(16) # 128-bit IV
-# cipher = AES.new(key, AES.MODE_CBC, iv)
-# ...
-#
-# Sample crypt() usage:
-# from pbkdf2 import crypt
-# pwhash = crypt("secret")
-# alleged_pw = raw_input("Enter password: ")
-# if pwhash == crypt(alleged_pw, pwhash):
-# print "Password good"
-# else:
-# print "Invalid password"
-#
-###########################################################################
-
-__version__ = "1.3"
-__all__ = ['PBKDF2', 'crypt']
-
-from struct import pack
-from random import randint
-import string
-import sys
-
-try:
- # Use PyCrypto (if available).
- from Crypto.Hash import HMAC, SHA as SHA1
-except ImportError:
- # PyCrypto not available. Use the Python standard library.
- import hmac as HMAC
- try:
- from hashlib import sha1 as SHA1
- except ImportError:
- # hashlib not available. Use the old sha module.
- import sha as SHA1
-
-
-# Added (jamalex) to use M2Crypto's PBKDF2 for crypt, if available, for efficiency
-# TODO(jamalex): add tests, as per discussion at:
-# https://github.com/learningequality/ka-lite/pull/84
-try:
- import M2Crypto.EVP
- def pbkdf2(word, salt, iterations):
- return M2Crypto.EVP.pbkdf2(word, str(salt), iterations, 24)
-except:
- # if not available, just use the pure Python implementation
- def pbkdf2(word, salt, iterations):
- return PBKDF2(word, salt, iterations).read(24)
-
-
-#
-# Python 2.1 thru 3.2 compatibility
-#
-
-if sys.version_info[0] == 2:
- _0xffffffffL = long(1) << 32
- def isunicode(s):
- return isinstance(s, unicode)
- def isbytes(s):
- return isinstance(s, str)
- def isinteger(n):
- return isinstance(n, (int, long))
- def b(s):
- return s
- def binxor(a, b):
- return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)])
- def b64encode(data, chars="+/"):
- tt = string.maketrans("+/", chars)
- return data.encode('base64').replace("\n", "").translate(tt)
- from binascii import b2a_hex
-else:
- _0xffffffffL = 0xffffffff
- def isunicode(s):
- return isinstance(s, str)
- def isbytes(s):
- return isinstance(s, bytes)
- def isinteger(n):
- return isinstance(n, int)
- def callable(obj):
- return hasattr(obj, '__call__')
- def b(s):
- return s.encode("latin-1")
- def binxor(a, b):
- return bytes([x ^ y for (x, y) in zip(a, b)])
- from base64 import b64encode as _b64encode
- def b64encode(data, chars="+/"):
- if isunicode(chars):
- return _b64encode(data, chars.encode('utf-8')).decode('utf-8')
- else:
- return _b64encode(data, chars)
- from binascii import b2a_hex as _b2a_hex
- def b2a_hex(s):
- return _b2a_hex(s).decode('us-ascii')
- xrange = range
-
-class PBKDF2(object):
- """PBKDF2.py : PKCS#5 v2.0 Password-Based Key Derivation
-
- This implementation takes a passphrase and a salt (and optionally an
- iteration count, a digest module, and a MAC module) and provides a
- file-like object from which an arbitrarily-sized key can be read.
-
- If the passphrase and/or salt are unicode objects, they are encoded as
- UTF-8 before they are processed.
-
- The idea behind PBKDF2 is to derive a cryptographic key from a
- passphrase and a salt.
-
- PBKDF2 may also be used as a strong salted password hash. The
- 'crypt' function is provided for that purpose.
-
- Remember: Keys generated using PBKDF2 are only as strong as the
- passphrases they are derived from.
- """
-
- def __init__(self, passphrase, salt, iterations=1000,
- digestmodule=SHA1, macmodule=HMAC):
- self.__macmodule = macmodule
- self.__digestmodule = digestmodule
- self._setup(passphrase, salt, iterations, self._pseudorandom)
-
- def _pseudorandom(self, key, msg):
- """Pseudorandom function. e.g. HMAC-SHA1"""
- return self.__macmodule.new(key=key, msg=msg,
- digestmod=self.__digestmodule).digest()
-
- def read(self, bytes):
- """Read the specified number of key bytes."""
- if self.closed:
- raise ValueError("file-like object is closed")
-
- size = len(self.__buf)
- blocks = [self.__buf]
- i = self.__blockNum
- while size < bytes:
- i += 1
- if i > _0xffffffffL or i < 1:
- # We could return "" here, but
- raise OverflowError("derived key too long")
- block = self.__f(i)
- blocks.append(block)
- size += len(block)
- buf = b("").join(blocks)
- retval = buf[:bytes]
- self.__buf = buf[bytes:]
- self.__blockNum = i
- return retval
-
- def __f(self, i):
- # i must fit within 32 bits
- assert 1 <= i <= _0xffffffffL
- U = self.__prf(self.__passphrase, self.__salt + pack("!L", i))
- result = U
- for j in xrange(2, 1+self.__iterations):
- U = self.__prf(self.__passphrase, U)
- result = binxor(result, U)
- return result
-
- def hexread(self, octets):
- """Read the specified number of octets. Return them as hexadecimal.
-
- Note that len(obj.hexread(n)) == 2*n.
- """
- return b2a_hex(self.read(octets))
-
- def _setup(self, passphrase, salt, iterations, prf):
- # Sanity checks:
-
- # passphrase and salt must be str or unicode (in the latter
- # case, we convert to UTF-8)
- if isunicode(passphrase):
- passphrase = passphrase.encode("UTF-8")
- elif not isbytes(passphrase):
- raise TypeError("passphrase must be str or unicode")
- if isunicode(salt):
- salt = salt.encode("UTF-8")
- elif not isbytes(salt):
- raise TypeError("salt must be str or unicode")
-
- # iterations must be an integer >= 1
- if not isinteger(iterations):
- raise TypeError("iterations must be an integer")
- if iterations < 1:
- raise ValueError("iterations must be at least 1")
-
- # prf must be callable
- if not callable(prf):
- raise TypeError("prf must be callable")
-
- self.__passphrase = passphrase
- self.__salt = salt
- self.__iterations = iterations
- self.__prf = prf
- self.__blockNum = 0
- self.__buf = b("")
- self.closed = False
-
- def close(self):
- """Close the stream."""
- if not self.closed:
- del self.__passphrase
- del self.__salt
- del self.__iterations
- del self.__prf
- del self.__blockNum
- del self.__buf
- self.closed = True
-
-def crypt(word, salt=None, iterations=None):
- """PBKDF2-based unix crypt(3) replacement.
-
- The number of iterations specified in the salt overrides the 'iterations'
- parameter.
-
- The effective hash length is 192 bits.
- """
-
- # Generate a (pseudo-)random salt if the user hasn't provided one.
- if salt is None:
- salt = _makesalt()
-
- # salt must be a string or the us-ascii subset of unicode
- if isunicode(salt):
- salt = salt.encode('us-ascii').decode('us-ascii')
- elif isbytes(salt):
- salt = salt.decode('us-ascii')
- else:
- raise TypeError("salt must be a string")
-
- # word must be a string or unicode (in the latter case, we convert to UTF-8)
- if isunicode(word):
- word = word.encode("UTF-8")
- elif not isbytes(word):
- raise TypeError("word must be a string or unicode")
-
- # Try to extract the real salt and iteration count from the salt
- if salt.startswith("$p5k2$"):
- (iterations, salt, dummy) = salt.split("$")[2:5]
- if iterations == "":
- iterations = 400
- else:
- converted = int(iterations, 16)
- if iterations != "%x" % converted: # lowercase hex, minimum digits
- raise ValueError("Invalid salt")
- iterations = converted
- if not (iterations >= 1):
- raise ValueError("Invalid salt")
-
- # Make sure the salt matches the allowed character set
- allowed = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./"
- for ch in salt:
- if ch not in allowed:
- raise ValueError("Illegal character %r in salt" % (ch,))
-
- if iterations is None or iterations == 400:
- iterations = 400
- salt = "$p5k2$$" + salt
- else:
- salt = "$p5k2$%x$%s" % (iterations, salt)
-
- rawhash = pbkdf2(word, salt, iterations)
-
- return salt + "$" + b64encode(rawhash, "./")
-
-# Add crypt as a static method of the PBKDF2 class
-# This makes it easier to do "from PBKDF2 import PBKDF2" and still use
-# crypt.
-PBKDF2.crypt = staticmethod(crypt)
-
-def _makesalt():
- """Return a 48-bit pseudorandom salt for crypt().
-
- This function is not suitable for generating cryptographic secrets.
- """
- binarysalt = b("").join([pack("@H", randint(0, 0xffff)) for i in range(3)])
- return b64encode(binarysalt, "./")
-
-# vim:set ts=4 sw=4 sts=4 expandtab:
diff --git a/python-packages/polib.py b/python-packages/polib.py
deleted file mode 100644
index 01857f9202..0000000000
--- a/python-packages/polib.py
+++ /dev/null
@@ -1,1766 +0,0 @@
-# -* coding: utf-8 -*-
-#
-# License: MIT (see LICENSE file provided)
-# vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4:
-
-"""
-**polib** allows you to manipulate, create, modify gettext files (pot, po and
-mo files). You can load existing files, iterate through it's entries, add,
-modify entries, comments or metadata, etc. or create new po files from scratch.
-
-**polib** provides a simple and pythonic API via the :func:`~polib.pofile` and
-:func:`~polib.mofile` convenience functions.
-"""
-
-__author__ = 'David Jean Louis '
-__version__ = '1.0.3'
-__all__ = ['pofile', 'POFile', 'POEntry', 'mofile', 'MOFile', 'MOEntry',
- 'default_encoding', 'escape', 'unescape', 'detect_encoding', ]
-
-import array
-import codecs
-import os
-import re
-import struct
-import sys
-import textwrap
-
-
-# the default encoding to use when encoding cannot be detected
-default_encoding = 'utf-8'
-
-# python 2/3 compatibility helpers {{{
-
-
-if sys.version_info[:2] < (3, 0):
- PY3 = False
- text_type = unicode
-
- def b(s):
- return s
-
- def u(s):
- return unicode(s, "unicode_escape")
-
-else:
- PY3 = True
- text_type = str
-
- def b(s):
- return s.encode("latin-1")
-
- def u(s):
- return s
-# }}}
-# _pofile_or_mofile {{{
-
-
-def _pofile_or_mofile(f, type, **kwargs):
- """
- Internal function used by :func:`polib.pofile` and :func:`polib.mofile` to
- honor the DRY concept.
- """
- # get the file encoding
- enc = kwargs.get('encoding')
- if enc is None:
- enc = detect_encoding(f, type == 'mofile')
-
- # parse the file
- kls = type == 'pofile' and _POFileParser or _MOFileParser
- parser = kls(
- f,
- encoding=enc,
- check_for_duplicates=kwargs.get('check_for_duplicates', False),
- klass=kwargs.get('klass')
- )
- instance = parser.parse()
- instance.wrapwidth = kwargs.get('wrapwidth', 78)
- return instance
-# }}}
-# function pofile() {{{
-
-
-def pofile(pofile, **kwargs):
- """
- Convenience function that parses the po or pot file ``pofile`` and returns
- a :class:`~polib.POFile` instance.
-
- Arguments:
-
- ``pofile``
- string, full or relative path to the po/pot file or its content (data).
-
- ``wrapwidth``
- integer, the wrap width, only useful when the ``-w`` option was passed
- to xgettext (optional, default: ``78``).
-
- ``encoding``
- string, the encoding to use (e.g. "utf-8") (default: ``None``, the
- encoding will be auto-detected).
-
- ``check_for_duplicates``
- whether to check for duplicate entries when adding entries to the
- file (optional, default: ``False``).
-
- ``klass``
- class which is used to instantiate the return value (optional,
- default: ``None``, the return value with be a :class:`~polib.POFile`
- instance).
- """
- return _pofile_or_mofile(pofile, 'pofile', **kwargs)
-# }}}
-# function mofile() {{{
-
-
-def mofile(mofile, **kwargs):
- """
- Convenience function that parses the mo file ``mofile`` and returns a
- :class:`~polib.MOFile` instance.
-
- Arguments:
-
- ``mofile``
- string, full or relative path to the mo file or its content (data).
-
- ``wrapwidth``
- integer, the wrap width, only useful when the ``-w`` option was passed
- to xgettext to generate the po file that was used to format the mo file
- (optional, default: ``78``).
-
- ``encoding``
- string, the encoding to use (e.g. "utf-8") (default: ``None``, the
- encoding will be auto-detected).
-
- ``check_for_duplicates``
- whether to check for duplicate entries when adding entries to the
- file (optional, default: ``False``).
-
- ``klass``
- class which is used to instantiate the return value (optional,
- default: ``None``, the return value with be a :class:`~polib.POFile`
- instance).
- """
- return _pofile_or_mofile(mofile, 'mofile', **kwargs)
-# }}}
-# function detect_encoding() {{{
-
-
-def detect_encoding(file, binary_mode=False):
- """
- Try to detect the encoding used by the ``file``. The ``file`` argument can
- be a PO or MO file path or a string containing the contents of the file.
- If the encoding cannot be detected, the function will return the value of
- ``default_encoding``.
-
- Arguments:
-
- ``file``
- string, full or relative path to the po/mo file or its content.
-
- ``binary_mode``
- boolean, set this to True if ``file`` is a mo file.
- """
- PATTERN = r'"?Content-Type:.+? charset=([\w_\-:\.]+)'
- rxt = re.compile(u(PATTERN))
- rxb = re.compile(b(PATTERN))
-
- def charset_exists(charset):
- """Check whether ``charset`` is valid or not."""
- try:
- codecs.lookup(charset)
- except LookupError:
- return False
- return True
-
- try:
- is_file = os.path.exists(file)
- except (ValueError, UnicodeEncodeError):
- is_file = False
-
- if not is_file:
- match = rxt.search(file)
- if match:
- enc = match.group(1).strip()
- if charset_exists(enc):
- return enc
- else:
- # For PY3, always treat as binary
- if binary_mode or PY3:
- mode = 'rb'
- rx = rxb
- else:
- mode = 'r'
- rx = rxt
- f = open(file, mode)
- for l in f.readlines():
- match = rx.search(l)
- if match:
- f.close()
- enc = match.group(1).strip()
- if not isinstance(enc, text_type):
- enc = enc.decode('utf-8')
- if charset_exists(enc):
- return enc
- f.close()
- return default_encoding
-# }}}
-# function escape() {{{
-
-
-def escape(st):
- """
- Escapes the characters ``\\\\``, ``\\t``, ``\\n``, ``\\r`` and ``"`` in
- the given string ``st`` and returns it.
- """
- return st.replace('\\', r'\\')\
- .replace('\t', r'\t')\
- .replace('\r', r'\r')\
- .replace('\n', r'\n')\
- .replace('\"', r'\"')
-# }}}
-# function unescape() {{{
-
-
-def unescape(st):
- """
- Unescapes the characters ``\\\\``, ``\\t``, ``\\n``, ``\\r`` and ``"`` in
- the given string ``st`` and returns it.
- """
- def unescape_repl(m):
- m = m.group(1)
- if m == 'n':
- return '\n'
- if m == 't':
- return '\t'
- if m == 'r':
- return '\r'
- if m == '\\':
- return '\\'
- return m # handles escaped double quote
- return re.sub(r'\\(\\|n|t|r|")', unescape_repl, st)
-# }}}
-# class _BaseFile {{{
-
-
-class _BaseFile(list):
- """
- Common base class for the :class:`~polib.POFile` and :class:`~polib.MOFile`
- classes. This class should **not** be instanciated directly.
- """
-
- def __init__(self, *args, **kwargs):
- """
- Constructor, accepts the following keyword arguments:
-
- ``pofile``
- string, the path to the po or mo file, or its content as a string.
-
- ``wrapwidth``
- integer, the wrap width, only useful when the ``-w`` option was
- passed to xgettext (optional, default: ``78``).
-
- ``encoding``
- string, the encoding to use, defaults to ``default_encoding``
- global variable (optional).
-
- ``check_for_duplicates``
- whether to check for duplicate entries when adding entries to the
- file, (optional, default: ``False``).
- """
- list.__init__(self)
- # the opened file handle
- pofile = kwargs.get('pofile', None)
- if pofile and os.path.exists(pofile):
- self.fpath = pofile
- else:
- self.fpath = kwargs.get('fpath')
- # the width at which lines should be wrapped
- self.wrapwidth = kwargs.get('wrapwidth', 78)
- # the file encoding
- self.encoding = kwargs.get('encoding', default_encoding)
- # whether to check for duplicate entries or not
- self.check_for_duplicates = kwargs.get('check_for_duplicates', False)
- # header
- self.header = ''
- # both po and mo files have metadata
- self.metadata = {}
- self.metadata_is_fuzzy = 0
-
- def __unicode__(self):
- """
- Returns the unicode representation of the file.
- """
- ret = []
- entries = [self.metadata_as_entry()] + \
- [e for e in self if not e.obsolete]
- for entry in entries:
- ret.append(entry.__unicode__(self.wrapwidth))
- for entry in self.obsolete_entries():
- ret.append(entry.__unicode__(self.wrapwidth))
- ret = u('\n').join(ret)
-
- assert isinstance(ret, text_type)
- #if type(ret) != text_type:
- # return unicode(ret, self.encoding)
- return ret
-
- if PY3:
- def __str__(self):
- return self.__unicode__()
- else:
- def __str__(self):
- """
- Returns the string representation of the file.
- """
- return unicode(self).encode(self.encoding)
-
- def __contains__(self, entry):
- """
- Overriden ``list`` method to implement the membership test (in and
- not in).
- The method considers that an entry is in the file if it finds an entry
- that has the same msgid (the test is **case sensitive**) and the same
- msgctxt (or none for both entries).
-
- Argument:
-
- ``entry``
- an instance of :class:`~polib._BaseEntry`.
- """
- return self.find(entry.msgid, by='msgid', msgctxt=entry.msgctxt) \
- is not None
-
- def __eq__(self, other):
- return str(self) == str(other)
-
- def append(self, entry):
- """
- Overriden method to check for duplicates entries, if a user tries to
- add an entry that is already in the file, the method will raise a
- ``ValueError`` exception.
-
- Argument:
-
- ``entry``
- an instance of :class:`~polib._BaseEntry`.
- """
- if self.check_for_duplicates and entry in self:
- raise ValueError('Entry "%s" already exists' % entry.msgid)
- super(_BaseFile, self).append(entry)
-
- def insert(self, index, entry):
- """
- Overriden method to check for duplicates entries, if a user tries to
- add an entry that is already in the file, the method will raise a
- ``ValueError`` exception.
-
- Arguments:
-
- ``index``
- index at which the entry should be inserted.
-
- ``entry``
- an instance of :class:`~polib._BaseEntry`.
- """
- if self.check_for_duplicates and entry in self:
- raise ValueError('Entry "%s" already exists' % entry.msgid)
- super(_BaseFile, self).insert(index, entry)
-
- def metadata_as_entry(self):
- """
- Returns the file metadata as a :class:`~polib.POFile` instance.
- """
- e = POEntry(msgid='')
- mdata = self.ordered_metadata()
- if mdata:
- strs = []
- for name, value in mdata:
- # Strip whitespace off each line in a multi-line entry
- strs.append('%s: %s' % (name, value))
- e.msgstr = '\n'.join(strs) + '\n'
- if self.metadata_is_fuzzy:
- e.flags.append('fuzzy')
- return e
-
- def save(self, fpath=None, repr_method='__unicode__'):
- """
- Saves the po file to ``fpath``.
- If it is an existing file and no ``fpath`` is provided, then the
- existing file is rewritten with the modified data.
-
- Keyword arguments:
-
- ``fpath``
- string, full or relative path to the file.
-
- ``repr_method``
- string, the method to use for output.
- """
- if self.fpath is None and fpath is None:
- raise IOError('You must provide a file path to save() method')
- contents = getattr(self, repr_method)()
- if fpath is None:
- fpath = self.fpath
- if repr_method == 'to_binary':
- fhandle = open(fpath, 'wb')
- else:
- fhandle = codecs.open(fpath, 'w', self.encoding)
- if not isinstance(contents, text_type):
- contents = contents.decode(self.encoding)
- fhandle.write(contents)
- fhandle.close()
- # set the file path if not set
- if self.fpath is None and fpath:
- self.fpath = fpath
- self._remove_extra_msgid()
-
- def find(self, st, by='msgid', include_obsolete_entries=False,
- msgctxt=False):
- """
- Find the entry which msgid (or property identified by the ``by``
- argument) matches the string ``st``.
-
- Keyword arguments:
-
- ``st``
- string, the string to search for.
-
- ``by``
- string, the property to use for comparison (default: ``msgid``).
-
- ``include_obsolete_entries``
- boolean, whether to also search in entries that are obsolete.
-
- ``msgctxt``
- string, allows to specify a specific message context for the
- search.
- """
- if include_obsolete_entries:
- entries = self[:]
- else:
- entries = [e for e in self if not e.obsolete]
- for e in entries:
- if getattr(e, by) == st:
- if msgctxt is not False and e.msgctxt != msgctxt:
- continue
- return e
- return None
-
- def ordered_metadata(self):
- """
- Convenience method that returns an ordered version of the metadata
- dictionary. The return value is list of tuples (metadata name,
- metadata_value).
- """
- # copy the dict first
- metadata = self.metadata.copy()
- data_order = [
- 'Project-Id-Version',
- 'Report-Msgid-Bugs-To',
- 'POT-Creation-Date',
- 'PO-Revision-Date',
- 'Last-Translator',
- 'Language-Team',
- 'MIME-Version',
- 'Content-Type',
- 'Content-Transfer-Encoding'
- ]
- ordered_data = []
- for data in data_order:
- try:
- value = metadata.pop(data)
- ordered_data.append((data, value))
- except KeyError:
- pass
- # the rest of the metadata will be alphabetically ordered since there
- # are no specs for this AFAIK
- for data in sorted(metadata.keys()):
- value = metadata[data]
- ordered_data.append((data, value))
- return ordered_data
-
- def to_binary(self):
- """
- Return the binary representation of the file.
- """
- offsets = []
- entries = self.translated_entries()
-
- # the keys are sorted in the .mo file
- def cmp(_self, other):
- # msgfmt compares entries with msgctxt if it exists
- self_msgid = _self.msgctxt and _self.msgctxt or _self.msgid
- other_msgid = other.msgctxt and other.msgctxt or other.msgid
- if self_msgid > other_msgid:
- return 1
- elif self_msgid < other_msgid:
- return -1
- else:
- return 0
- # add metadata entry
- entries.sort(key=lambda o: o.msgctxt or o.msgid)
- mentry = self.metadata_as_entry()
- #mentry.msgstr = mentry.msgstr.replace('\\n', '').lstrip()
- entries = [mentry] + entries
- entries_len = len(entries)
- ids, strs = b(''), b('')
- for e in entries:
- # For each string, we need size and file offset. Each string is
- # NUL terminated; the NUL does not count into the size.
- msgid = b('')
- if e.msgctxt:
- # Contexts are stored by storing the concatenation of the
- # context, a byte, and the original string
- msgid = self._encode(e.msgctxt + '\4')
- if e.msgid_plural:
- msgstr = []
- for index in sorted(e.msgstr_plural.keys()):
- msgstr.append(e.msgstr_plural[index])
- msgid += self._encode(e.msgid + '\0' + e.msgid_plural)
- msgstr = self._encode('\0'.join(msgstr))
- else:
- msgid += self._encode(e.msgid)
- msgstr = self._encode(e.msgstr)
- offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
- ids += msgid + b('\0')
- strs += msgstr + b('\0')
-
- # The header is 7 32-bit unsigned integers.
- keystart = 7 * 4 + 16 * entries_len
- # and the values start after the keys
- valuestart = keystart + len(ids)
- koffsets = []
- voffsets = []
- # The string table first has the list of keys, then the list of values.
- # Each entry has first the size of the string, then the file offset.
- for o1, l1, o2, l2 in offsets:
- koffsets += [l1, o1 + keystart]
- voffsets += [l2, o2 + valuestart]
- offsets = koffsets + voffsets
- # check endianness for magic number
- if struct.pack('@h', 1) == struct.pack(' 1: # python 3.2 or superior
- output += array.array("i", offsets).tobytes()
- else:
- output += array.array("i", offsets).tostring()
- output += ids
- output += strs
- return output
-
- # ok, here's the deal: there's a bug right now in polib.py in which
- # it creates an empty header AUTOMATICALLY. Plus, there is no way
- # to specify what this header contains. So what do we do? We delete this
- # header. TODO for Aron: Fix polib.py
- def _remove_extra_msgid(self):
- pofilename = self.fpath
- header_to_remove = '# \nmsgid ""\nmsgstr ""\n\n'
- with open(pofilename, 'r') as pofile:
- polines = pofile.read()
- if polines.startswith(header_to_remove):
- polines = polines[len(header_to_remove):]
- with open(pofilename, 'w') as fp:
- fp.write(polines)
-
- def _encode(self, mixed):
- """
- Encodes the given ``mixed`` argument with the file encoding if and
- only if it's an unicode string and returns the encoded string.
- """
- if isinstance(mixed, text_type):
- mixed = mixed.encode(self.encoding)
- return mixed
-# }}}
-# class POFile {{{
-
-
-class POFile(_BaseFile):
- """
- Po (or Pot) file reader/writer.
- This class inherits the :class:`~polib._BaseFile` class and, by extension,
- the python ``list`` type.
- """
-
- def __unicode__(self):
- """
- Returns the unicode representation of the po file.
- """
- ret, headers = '', self.header.split('\n')
- for header in headers:
- if header[:1] in [',', ':']:
- ret += '#%s\n' % header
- else:
- ret += '# %s\n' % header
-
- if not isinstance(ret, text_type):
- ret = ret.decode(self.encoding)
-
- return ret + _BaseFile.__unicode__(self)
-
- def save_as_mofile(self, fpath):
- """
- Saves the binary representation of the file to given ``fpath``.
-
- Keyword argument:
-
- ``fpath``
- string, full or relative path to the mo file.
- """
- _BaseFile.save(self, fpath, 'to_binary')
-
- def percent_translated(self):
- """
- Convenience method that returns the percentage of translated
- messages.
- """
- total = len([e for e in self if not e.obsolete])
- if total == 0:
- return 100
- translated = len(self.translated_entries())
- return int((100.00 / float(total)) * translated)
-
- def translated_entries(self):
- """
- Convenience method that returns the list of translated entries.
- """
- return [e for e in self if e.translated()]
-
- def untranslated_entries(self):
- """
- Convenience method that returns the list of untranslated entries.
- """
- return [e for e in self if not e.translated() and not e.obsolete
- and not 'fuzzy' in e.flags]
-
- def fuzzy_entries(self):
- """
- Convenience method that returns the list of fuzzy entries.
- """
- return [e for e in self if 'fuzzy' in e.flags]
-
- def obsolete_entries(self):
- """
- Convenience method that returns the list of obsolete entries.
- """
- return [e for e in self if e.obsolete]
-
- def merge(self, refpot):
- """
- Convenience method that merges the current pofile with the pot file
- provided. It behaves exactly as the gettext msgmerge utility:
-
- * comments of this file will be preserved, but extracted comments and
- occurrences will be discarded;
- * any translations or comments in the file will be discarded, however,
- dot comments and file positions will be preserved;
- * the fuzzy flags are preserved.
-
- Keyword argument:
-
- ``refpot``
- object POFile, the reference catalog.
- """
- # Store entries in dict/set for faster access
- self_entries = dict((entry.msgid, entry) for entry in self)
- refpot_msgids = set(entry.msgid for entry in refpot)
- # Merge entries that are in the refpot
- for entry in refpot:
- e = self_entries.get(entry.msgid)
- if e is None:
- e = POEntry()
- self.append(e)
- e.merge(entry)
- # ok, now we must "obsolete" entries that are not in the refpot anymore
- for entry in self:
- if entry.msgid not in refpot_msgids:
- entry.obsolete = True
-# }}}
-# class MOFile {{{
-
-
-class MOFile(_BaseFile):
- """
- Mo file reader/writer.
- This class inherits the :class:`~polib._BaseFile` class and, by
- extension, the python ``list`` type.
- """
- BIG_ENDIAN = 0xde120495
- LITTLE_ENDIAN = 0x950412de
-
- def __init__(self, *args, **kwargs):
- """
- Constructor, accepts all keywords arguments accepted by
- :class:`~polib._BaseFile` class.
- """
- _BaseFile.__init__(self, *args, **kwargs)
- self.magic_number = None
- self.version = 0
-
- def save_as_pofile(self, fpath):
- """
- Saves the mofile as a pofile to ``fpath``.
-
- Keyword argument:
-
- ``fpath``
- string, full or relative path to the file.
- """
- _BaseFile.save(self, fpath)
-
- def save(self, fpath=None):
- """
- Saves the mofile to ``fpath``.
-
- Keyword argument:
-
- ``fpath``
- string, full or relative path to the file.
- """
- _BaseFile.save(self, fpath, 'to_binary')
-
- def percent_translated(self):
- """
- Convenience method to keep the same interface with POFile instances.
- """
- return 100
-
- def translated_entries(self):
- """
- Convenience method to keep the same interface with POFile instances.
- """
- return self
-
- def untranslated_entries(self):
- """
- Convenience method to keep the same interface with POFile instances.
- """
- return []
-
- def fuzzy_entries(self):
- """
- Convenience method to keep the same interface with POFile instances.
- """
- return []
-
- def obsolete_entries(self):
- """
- Convenience method to keep the same interface with POFile instances.
- """
- return []
-# }}}
-# class _BaseEntry {{{
-
-
-class _BaseEntry(object):
- """
- Base class for :class:`~polib.POEntry` and :class:`~polib.MOEntry` classes.
- This class should **not** be instanciated directly.
- """
-
- def __init__(self, *args, **kwargs):
- """
- Constructor, accepts the following keyword arguments:
-
- ``msgid``
- string, the entry msgid.
-
- ``msgstr``
- string, the entry msgstr.
-
- ``msgid_plural``
- string, the entry msgid_plural.
-
- ``msgstr_plural``
- list, the entry msgstr_plural lines.
-
- ``msgctxt``
- string, the entry context (msgctxt).
-
- ``obsolete``
- bool, whether the entry is "obsolete" or not.
-
- ``encoding``
- string, the encoding to use, defaults to ``default_encoding``
- global variable (optional).
- """
- self.msgid = kwargs.get('msgid', '')
- self.msgstr = kwargs.get('msgstr', '')
- self.msgid_plural = kwargs.get('msgid_plural', '')
- self.msgstr_plural = kwargs.get('msgstr_plural', {})
- self.msgctxt = kwargs.get('msgctxt', None)
- self.obsolete = kwargs.get('obsolete', False)
- self.encoding = kwargs.get('encoding', default_encoding)
-
- def __unicode__(self, wrapwidth=78):
- """
- Returns the unicode representation of the entry.
- """
- if self.obsolete:
- delflag = '#~ '
- else:
- delflag = ''
- ret = []
- # write the msgctxt if any
- if self.msgctxt is not None:
- ret += self._str_field("msgctxt", delflag, "", self.msgctxt,
- wrapwidth)
- # write the msgid
- ret += self._str_field("msgid", delflag, "", self.msgid, wrapwidth)
- # write the msgid_plural if any
- if self.msgid_plural:
- ret += self._str_field("msgid_plural", delflag, "",
- self.msgid_plural, wrapwidth)
- if self.msgstr_plural:
- # write the msgstr_plural if any
- msgstrs = self.msgstr_plural
- keys = list(msgstrs)
- keys.sort()
- for index in keys:
- msgstr = msgstrs[index]
- plural_index = '[%s]' % index
- ret += self._str_field("msgstr", delflag, plural_index, msgstr,
- wrapwidth)
- else:
- # otherwise write the msgstr
- ret += self._str_field("msgstr", delflag, "", self.msgstr,
- wrapwidth)
- ret.append('')
- ret = u('\n').join(ret)
- return ret
-
- if PY3:
- def __str__(self):
- return self.__unicode__()
- else:
- def __str__(self):
- """
- Returns the string representation of the entry.
- """
- return unicode(self).encode(self.encoding)
-
- def __eq__(self, other):
- return str(self) == str(other)
-
- def _str_field(self, fieldname, delflag, plural_index, field,
- wrapwidth=78):
- lines = field.splitlines(True)
- if len(lines) > 1:
- lines = [''] + lines # start with initial empty line
- else:
- escaped_field = escape(field)
- specialchars_count = 0
- for c in ['\\', '\n', '\r', '\t', '"']:
- specialchars_count += field.count(c)
- # comparison must take into account fieldname length + one space
- # + 2 quotes (eg. msgid "")
- flength = len(fieldname) + 3
- if plural_index:
- flength += len(plural_index)
- real_wrapwidth = wrapwidth - flength + specialchars_count
- if wrapwidth > 0 and len(field) > real_wrapwidth:
- # Wrap the line but take field name into account
- lines = [''] + [unescape(item) for item in wrap(
- escaped_field,
- wrapwidth - 2, # 2 for quotes ""
- drop_whitespace=False,
- break_long_words=False
- )]
- else:
- lines = [field]
- if fieldname.startswith('previous_'):
- # quick and dirty trick to get the real field name
- fieldname = fieldname[9:]
-
- ret = ['%s%s%s "%s"' % (delflag, fieldname, plural_index,
- escape(lines.pop(0)))]
- for mstr in lines:
- ret.append('%s"%s"' % (delflag, escape(mstr)))
- return ret
-# }}}
-# class POEntry {{{
-
-
-class POEntry(_BaseEntry):
- """
- Represents a po file entry.
- """
-
- def __init__(self, *args, **kwargs):
- """
- Constructor, accepts the following keyword arguments:
-
- ``comment``
- string, the entry comment.
-
- ``tcomment``
- string, the entry translator comment.
-
- ``occurrences``
- list, the entry occurrences.
-
- ``flags``
- list, the entry flags.
-
- ``previous_msgctxt``
- string, the entry previous context.
-
- ``previous_msgid``
- string, the entry previous msgid.
-
- ``previous_msgid_plural``
- string, the entry previous msgid_plural.
- """
- _BaseEntry.__init__(self, *args, **kwargs)
- self.comment = kwargs.get('comment', '')
- self.tcomment = kwargs.get('tcomment', '')
- self.occurrences = kwargs.get('occurrences', [])
- self.flags = kwargs.get('flags', [])
- self.previous_msgctxt = kwargs.get('previous_msgctxt', None)
- self.previous_msgid = kwargs.get('previous_msgid', None)
- self.previous_msgid_plural = kwargs.get('previous_msgid_plural', None)
-
- def __unicode__(self, wrapwidth=78):
- """
- Returns the unicode representation of the entry.
- """
- if self.obsolete:
- return _BaseEntry.__unicode__(self, wrapwidth)
-
- ret = []
- # comments first, if any (with text wrapping as xgettext does)
- comments = [('comment', '#. '), ('tcomment', '# ')]
- for c in comments:
- val = getattr(self, c[0])
- if val:
- for comment in val.split('\n'):
- if wrapwidth > 0 and len(comment) + len(c[1]) > wrapwidth:
- ret += wrap(
- comment,
- wrapwidth,
- initial_indent=c[1],
- subsequent_indent=c[1],
- break_long_words=False
- )
- else:
- ret.append('%s%s' % (c[1], comment))
-
- # occurrences (with text wrapping as xgettext does)
- if self.occurrences:
- filelist = []
- for fpath, lineno in self.occurrences:
- if lineno:
- filelist.append('%s:%s' % (fpath, lineno))
- else:
- filelist.append(fpath)
- filestr = ' '.join(filelist)
- if wrapwidth > 0 and len(filestr) + 3 > wrapwidth:
- # textwrap split words that contain hyphen, this is not
- # what we want for filenames, so the dirty hack is to
- # temporally replace hyphens with a char that a file cannot
- # contain, like "*"
- ret += [l.replace('*', '-') for l in wrap(
- filestr.replace('-', '*'),
- wrapwidth,
- initial_indent='#: ',
- subsequent_indent='#: ',
- break_long_words=False
- )]
- else:
- ret.append('#: ' + filestr)
-
- # flags (TODO: wrapping ?)
- if self.flags:
- ret.append('#, %s' % ', '.join(self.flags))
-
- # previous context and previous msgid/msgid_plural
- fields = ['previous_msgctxt', 'previous_msgid',
- 'previous_msgid_plural']
- for f in fields:
- val = getattr(self, f)
- if val:
- ret += self._str_field(f, "#| ", "", val, wrapwidth)
-
- ret.append(_BaseEntry.__unicode__(self, wrapwidth))
- ret = u('\n').join(ret)
-
- assert isinstance(ret, text_type)
- #if type(ret) != types.UnicodeType:
- # return unicode(ret, self.encoding)
- return ret
-
- def __cmp__(self, other):
- """
- Called by comparison operations if rich comparison is not defined.
- """
-
- # First: Obsolete test
- if self.obsolete != other.obsolete:
- if self.obsolete:
- return -1
- else:
- return 1
- # Work on a copy to protect original
- occ1 = sorted(self.occurrences[:])
- occ2 = sorted(other.occurrences[:])
- pos = 0
- for entry1 in occ1:
- try:
- entry2 = occ2[pos]
- except IndexError:
- return 1
- pos = pos + 1
- if entry1[0] != entry2[0]:
- if entry1[0] > entry2[0]:
- return 1
- else:
- return -1
- if entry1[1] != entry2[1]:
- if entry1[1] > entry2[1]:
- return 1
- else:
- return -1
- # Finally: Compare message ID
- if self.msgid > other.msgid:
- return 1
- elif self.msgid < other.msgid:
- return -1
- return 0
-
- def __gt__(self, other):
- return self.__cmp__(other) > 0
-
- def __lt__(self, other):
- return self.__cmp__(other) < 0
-
- def __ge__(self, other):
- return self.__cmp__(other) >= 0
-
- def __le__(self, other):
- return self.__cmp__(other) <= 0
-
- def __eq__(self, other):
- return self.__cmp__(other) == 0
-
- def __ne__(self, other):
- return self.__cmp__(other) != 0
-
- def translated(self):
- """
- Returns ``True`` if the entry has been translated or ``False``
- otherwise.
- """
- if self.obsolete or 'fuzzy' in self.flags:
- return False
- if self.msgstr != '':
- return True
- if self.msgstr_plural:
- for pos in self.msgstr_plural:
- if self.msgstr_plural[pos] == '':
- return False
- return True
- return False
-
- def merge(self, other):
- """
- Merge the current entry with the given pot entry.
- """
- self.msgid = other.msgid
- self.msgctxt = other.msgctxt
- self.occurrences = other.occurrences
- self.comment = other.comment
- self.msgstr = other.msgstr if other.msgstr else self.msgstr
- fuzzy = 'fuzzy' in self.flags
- self.flags = other.flags[:] # clone flags
- if fuzzy:
- self.flags.append('fuzzy')
- self.msgid_plural = other.msgid_plural
- self.obsolete = other.obsolete
- self.previous_msgctxt = other.previous_msgctxt
- self.previous_msgid = other.previous_msgid
- self.previous_msgid_plural = other.previous_msgid_plural
- if other.msgstr_plural:
- for pos in other.msgstr_plural:
- try:
- # keep existing translation at pos if any
- self.msgstr_plural[pos]
- except KeyError:
- self.msgstr_plural[pos] = ''
-# }}}
-# class MOEntry {{{
-
-
-class MOEntry(_BaseEntry):
- """
- Represents a mo file entry.
- """
- pass
-# }}}
-# class _POFileParser {{{
-
-
-class _POFileParser(object):
- """
- A finite state machine to parse efficiently and correctly po
- file format.
- """
-
- def __init__(self, pofile, *args, **kwargs):
- """
- Constructor.
-
- Keyword arguments:
-
- ``pofile``
- string, path to the po file or its content
-
- ``encoding``
- string, the encoding to use, defaults to ``default_encoding``
- global variable (optional).
-
- ``check_for_duplicates``
- whether to check for duplicate entries when adding entries to the
- file (optional, default: ``False``).
- """
- enc = kwargs.get('encoding', default_encoding)
- if os.path.exists(pofile):
- try:
- self.fhandle = codecs.open(pofile, 'rU', enc)
- except LookupError:
- enc = default_encoding
- self.fhandle = codecs.open(pofile, 'rU', enc)
- else:
- self.fhandle = pofile.splitlines()
-
- klass = kwargs.get('klass')
- if klass is None:
- klass = POFile
- self.instance = klass(
- pofile=pofile,
- encoding=enc,
- check_for_duplicates=kwargs.get('check_for_duplicates', False)
- )
- self.transitions = {}
- self.current_entry = POEntry()
- self.current_state = 'ST'
- self.current_token = None
- # two memo flags used in handlers
- self.msgstr_index = 0
- self.entry_obsolete = 0
- # Configure the state machine, by adding transitions.
- # Signification of symbols:
- # * ST: Beginning of the file (start)
- # * HE: Header
- # * TC: a translation comment
- # * GC: a generated comment
- # * OC: a file/line occurence
- # * FL: a flags line
- # * CT: a message context
- # * PC: a previous msgctxt
- # * PM: a previous msgid
- # * PP: a previous msgid_plural
- # * MI: a msgid
- # * MP: a msgid plural
- # * MS: a msgstr
- # * MX: a msgstr plural
- # * MC: a msgid or msgstr continuation line
- all = ['ST', 'HE', 'GC', 'OC', 'FL', 'CT', 'PC', 'PM', 'PP', 'TC',
- 'MS', 'MP', 'MX', 'MI']
-
- self.add('TC', ['ST', 'HE'], 'HE')
- self.add('TC', ['GC', 'OC', 'FL', 'TC', 'PC', 'PM', 'PP', 'MS',
- 'MP', 'MX', 'MI'], 'TC')
- self.add('GC', all, 'GC')
- self.add('OC', all, 'OC')
- self.add('FL', all, 'FL')
- self.add('PC', all, 'PC')
- self.add('PM', all, 'PM')
- self.add('PP', all, 'PP')
- self.add('CT', ['ST', 'HE', 'GC', 'OC', 'FL', 'TC', 'PC', 'PM',
- 'PP', 'MS', 'MX'], 'CT')
- self.add('MI', ['ST', 'HE', 'GC', 'OC', 'FL', 'CT', 'TC', 'PC',
- 'PM', 'PP', 'MS', 'MX'], 'MI')
- self.add('MP', ['TC', 'GC', 'PC', 'PM', 'PP', 'MI'], 'MP')
- self.add('MS', ['MI', 'MP', 'TC'], 'MS')
- self.add('MX', ['MI', 'MX', 'MP', 'TC'], 'MX')
- self.add('MC', ['CT', 'MI', 'MP', 'MS', 'MX', 'PM', 'PP', 'PC'], 'MC')
-
- def parse(self):
- """
- Run the state machine, parse the file line by line and call process()
- with the current matched symbol.
- """
- i = 0
-
- keywords = {
- 'msgctxt': 'CT',
- 'msgid': 'MI',
- 'msgstr': 'MS',
- 'msgid_plural': 'MP',
- }
- prev_keywords = {
- 'msgid_plural': 'PP',
- 'msgid': 'PM',
- 'msgctxt': 'PC',
- }
-
- for line in self.fhandle:
- i += 1
- line = line.strip()
- if line == '':
- continue
-
- tokens = line.split(None, 2)
- nb_tokens = len(tokens)
-
- if tokens[0] == '#~|':
- continue
-
- if tokens[0] == '#~' and nb_tokens > 1:
- line = line[3:].strip()
- tokens = tokens[1:]
- nb_tokens -= 1
- self.entry_obsolete = 1
- else:
- self.entry_obsolete = 0
-
- # Take care of keywords like
- # msgid, msgid_plural, msgctxt & msgstr.
- if tokens[0] in keywords and nb_tokens > 1:
- line = line[len(tokens[0]):].lstrip()
- if re.search(r'([^\\]|^)"', line[1:-1]):
- raise IOError('Syntax error in po file %s (line %s): '
- 'unescaped double quote found' %
- (self.instance.fpath, i))
- self.current_token = line
- self.process(keywords[tokens[0]], i)
- continue
-
- self.current_token = line
-
- if tokens[0] == '#:':
- if nb_tokens <= 1:
- continue
- # we are on a occurrences line
- self.process('OC', i)
-
- elif line[:1] == '"':
- # we are on a continuation line
- if re.search(r'([^\\]|^)"', line[1:-1]):
- raise IOError('Syntax error in po file %s (line %s): '
- 'unescaped double quote found' %
- (self.instance.fpath, i))
- self.process('MC', i)
-
- elif line[:7] == 'msgstr[':
- # we are on a msgstr plural
- self.process('MX', i)
-
- elif tokens[0] == '#,':
- if nb_tokens <= 1:
- continue
- # we are on a flags line
- self.process('FL', i)
-
- elif tokens[0] == '#' or tokens[0].startswith('##'):
- if line == '#':
- line += ' '
- # we are on a translator comment line
- self.process('TC', i)
-
- elif tokens[0] == '#.':
- if nb_tokens <= 1:
- continue
- # we are on a generated comment line
- self.process('GC', i)
-
- elif tokens[0] == '#|':
- if nb_tokens <= 1:
- raise IOError('Syntax error in po file %s (line %s)' %
- (self.instance.fpath, i))
-
- # Remove the marker and any whitespace right after that.
- line = line[2:].lstrip()
- self.current_token = line
-
- if tokens[1].startswith('"'):
- # Continuation of previous metadata.
- self.process('MC', i)
- continue
-
- if nb_tokens == 2:
- # Invalid continuation line.
- raise IOError('Syntax error in po file %s (line %s): '
- 'invalid continuation line' %
- (self.instance.fpath, i))
-
- # we are on a "previous translation" comment line,
- if tokens[1] not in prev_keywords:
- # Unknown keyword in previous translation comment.
- raise IOError('Syntax error in po file %s (line %s): '
- 'unknown keyword %s' %
- (self.instance.fpath, i, tokens[1]))
-
- # Remove the keyword and any whitespace
- # between it and the starting quote.
- line = line[len(tokens[1]):].lstrip()
- self.current_token = line
- self.process(prev_keywords[tokens[1]], i)
-
- else:
- raise IOError('Syntax error in po file %s (line %s)' %
- (self.instance.fpath, i))
-
- if self.current_entry:
- # since entries are added when another entry is found, we must add
- # the last entry here (only if there are lines)
- self.instance.append(self.current_entry)
- # before returning the instance, check if there's metadata and if
- # so extract it in a dict
- metadataentry = self.instance.find('')
- if metadataentry: # metadata found
- # remove the entry
- self.instance.remove(metadataentry)
- self.instance.metadata_is_fuzzy = metadataentry.flags
- key = None
- for msg in metadataentry.msgstr.splitlines():
- try:
- key, val = msg.split(':', 1)
- self.instance.metadata[key] = val.strip()
- except (ValueError, KeyError):
- if key is not None:
- self.instance.metadata[key] += '\n' + msg.strip()
- # close opened file
- if not isinstance(self.fhandle, list): # must be file
- self.fhandle.close()
- return self.instance
-
- def add(self, symbol, states, next_state):
- """
- Add a transition to the state machine.
-
- Keywords arguments:
-
- ``symbol``
- string, the matched token (two chars symbol).
-
- ``states``
- list, a list of states (two chars symbols).
-
- ``next_state``
- the next state the fsm will have after the action.
- """
- for state in states:
- action = getattr(self, 'handle_%s' % next_state.lower())
- self.transitions[(symbol, state)] = (action, next_state)
-
- def process(self, symbol, linenum):
- """
- Process the transition corresponding to the current state and the
- symbol provided.
-
- Keywords arguments:
-
- ``symbol``
- string, the matched token (two chars symbol).
-
- ``linenum``
- integer, the current line number of the parsed file.
- """
- try:
- (action, state) = self.transitions[(symbol, self.current_state)]
- if action():
- self.current_state = state
- except Exception:
- raise IOError('Syntax error in po file (line %s)' % linenum)
-
- # state handlers
-
- def handle_he(self):
- """Handle a header comment."""
- if self.instance.header != '':
- self.instance.header += '\n'
- self.instance.header += self.current_token[2:]
- return 1
-
- def handle_tc(self):
- """Handle a translator comment."""
- if self.current_state in ['MC', 'MS', 'MX']:
- self.instance.append(self.current_entry)
- self.current_entry = POEntry()
- if self.current_entry.tcomment != '':
- self.current_entry.tcomment += '\n'
- tcomment = self.current_token.lstrip('#')
- if tcomment.startswith(' '):
- tcomment = tcomment[1:]
- self.current_entry.tcomment += tcomment
- return True
-
- def handle_gc(self):
- """Handle a generated comment."""
- if self.current_state in ['MC', 'MS', 'MX']:
- self.instance.append(self.current_entry)
- self.current_entry = POEntry()
- if self.current_entry.comment != '':
- self.current_entry.comment += '\n'
- self.current_entry.comment += self.current_token[3:]
- return True
-
- def handle_oc(self):
- """Handle a file:num occurence."""
- if self.current_state in ['MC', 'MS', 'MX']:
- self.instance.append(self.current_entry)
- self.current_entry = POEntry()
- occurrences = self.current_token[3:].split()
- for occurrence in occurrences:
- if occurrence != '':
- try:
- fil, line = occurrence.split(':')
- if not line.isdigit():
- fil = fil + line
- line = ''
- self.current_entry.occurrences.append((fil, line))
- except (ValueError, AttributeError):
- self.current_entry.occurrences.append((occurrence, ''))
- return True
-
- def handle_fl(self):
- """Handle a flags line."""
- if self.current_state in ['MC', 'MS', 'MX']:
- self.instance.append(self.current_entry)
- self.current_entry = POEntry()
- self.current_entry.flags += self.current_token[3:].split(', ')
- return True
-
- def handle_pp(self):
- """Handle a previous msgid_plural line."""
- if self.current_state in ['MC', 'MS', 'MX']:
- self.instance.append(self.current_entry)
- self.current_entry = POEntry()
- self.current_entry.previous_msgid_plural = \
- unescape(self.current_token[1:-1])
- return True
-
- def handle_pm(self):
- """Handle a previous msgid line."""
- if self.current_state in ['MC', 'MS', 'MX']:
- self.instance.append(self.current_entry)
- self.current_entry = POEntry()
- self.current_entry.previous_msgid = \
- unescape(self.current_token[1:-1])
- return True
-
- def handle_pc(self):
- """Handle a previous msgctxt line."""
- if self.current_state in ['MC', 'MS', 'MX']:
- self.instance.append(self.current_entry)
- self.current_entry = POEntry()
- self.current_entry.previous_msgctxt = \
- unescape(self.current_token[1:-1])
- return True
-
- def handle_ct(self):
- """Handle a msgctxt."""
- if self.current_state in ['MC', 'MS', 'MX']:
- self.instance.append(self.current_entry)
- self.current_entry = POEntry()
- self.current_entry.msgctxt = unescape(self.current_token[1:-1])
- return True
-
- def handle_mi(self):
- """Handle a msgid."""
- if self.current_state in ['MC', 'MS', 'MX']:
- self.instance.append(self.current_entry)
- self.current_entry = POEntry()
- self.current_entry.obsolete = self.entry_obsolete
- self.current_entry.msgid = unescape(self.current_token[1:-1])
- return True
-
- def handle_mp(self):
- """Handle a msgid plural."""
- self.current_entry.msgid_plural = unescape(self.current_token[1:-1])
- return True
-
- def handle_ms(self):
- """Handle a msgstr."""
- self.current_entry.msgstr = unescape(self.current_token[1:-1])
- return True
-
- def handle_mx(self):
- """Handle a msgstr plural."""
- index, value = self.current_token[7], self.current_token[11:-1]
- self.current_entry.msgstr_plural[index] = unescape(value)
- self.msgstr_index = index
- return True
-
- def handle_mc(self):
- """Handle a msgid or msgstr continuation line."""
- token = unescape(self.current_token[1:-1])
- if self.current_state == 'CT':
- self.current_entry.msgctxt += token
- elif self.current_state == 'MI':
- self.current_entry.msgid += token
- elif self.current_state == 'MP':
- self.current_entry.msgid_plural += token
- elif self.current_state == 'MS':
- self.current_entry.msgstr += token
- elif self.current_state == 'MX':
- self.current_entry.msgstr_plural[self.msgstr_index] += token
- elif self.current_state == 'PP':
- token = token[3:]
- self.current_entry.previous_msgid_plural += token
- elif self.current_state == 'PM':
- token = token[3:]
- self.current_entry.previous_msgid += token
- elif self.current_state == 'PC':
- token = token[3:]
- self.current_entry.previous_msgctxt += token
- # don't change the current state
- return False
-# }}}
-# class _MOFileParser {{{
-
-
-class _MOFileParser(object):
- """
- A class to parse binary mo files.
- """
-
- def __init__(self, mofile, *args, **kwargs):
- """
- Constructor.
-
- Keyword arguments:
-
- ``mofile``
- string, path to the mo file or its content
-
- ``encoding``
- string, the encoding to use, defaults to ``default_encoding``
- global variable (optional).
-
- ``check_for_duplicates``
- whether to check for duplicate entries when adding entries to the
- file (optional, default: ``False``).
- """
- self.fhandle = open(mofile, 'rb')
-
- klass = kwargs.get('klass')
- if klass is None:
- klass = MOFile
- self.instance = klass(
- fpath=mofile,
- encoding=kwargs.get('encoding', default_encoding),
- check_for_duplicates=kwargs.get('check_for_duplicates', False)
- )
-
- def parse(self):
- """
- Build the instance with the file handle provided in the
- constructor.
- """
- # parse magic number
- magic_number = self._readbinary(' 1:
- entry = self._build_entry(
- msgid=msgid_tokens[0],
- msgid_plural=msgid_tokens[1],
- msgstr_plural=dict((k, v) for k, v in
- enumerate(msgstr.split(b('\0'))))
- )
- else:
- entry = self._build_entry(msgid=msgid, msgstr=msgstr)
- self.instance.append(entry)
- # close opened file
- self.fhandle.close()
- return self.instance
-
- def _build_entry(self, msgid, msgstr=None, msgid_plural=None,
- msgstr_plural=None):
- msgctxt_msgid = msgid.split(b('\x04'))
- encoding = self.instance.encoding
- if len(msgctxt_msgid) > 1:
- kwargs = {
- 'msgctxt': msgctxt_msgid[0].decode(encoding),
- 'msgid': msgctxt_msgid[1].decode(encoding),
- }
- else:
- kwargs = {'msgid': msgid.decode(encoding)}
- if msgstr:
- kwargs['msgstr'] = msgstr.decode(encoding)
- if msgid_plural:
- kwargs['msgid_plural'] = msgid_plural.decode(encoding)
- if msgstr_plural:
- for k in msgstr_plural:
- msgstr_plural[k] = msgstr_plural[k].decode(encoding)
- kwargs['msgstr_plural'] = msgstr_plural
- return MOEntry(**kwargs)
-
- def _readbinary(self, fmt, numbytes):
- """
- Private method that unpack n bytes of data using format .
- It returns a tuple or a mixed value if the tuple length is 1.
- """
- bytes = self.fhandle.read(numbytes)
- tup = struct.unpack(fmt, bytes)
- if len(tup) == 1:
- return tup[0]
- return tup
-# }}}
-# class TextWrapper {{{
-
-
-class TextWrapper(textwrap.TextWrapper):
- """
- Subclass of textwrap.TextWrapper that backport the
- drop_whitespace option.
- """
- def __init__(self, *args, **kwargs):
- drop_whitespace = kwargs.pop('drop_whitespace', True)
- textwrap.TextWrapper.__init__(self, *args, **kwargs)
- self.drop_whitespace = drop_whitespace
-
- def _wrap_chunks(self, chunks):
- """_wrap_chunks(chunks : [string]) -> [string]
-
- Wrap a sequence of text chunks and return a list of lines of
- length 'self.width' or less. (If 'break_long_words' is false,
- some lines may be longer than this.) Chunks correspond roughly
- to words and the whitespace between them: each chunk is
- indivisible (modulo 'break_long_words'), but a line break can
- come between any two chunks. Chunks should not have internal
- whitespace; ie. a chunk is either all whitespace or a "word".
- Whitespace chunks will be removed from the beginning and end of
- lines, but apart from that whitespace is preserved.
- """
- lines = []
- if self.width <= 0:
- raise ValueError("invalid width %r (must be > 0)" % self.width)
-
- # Arrange in reverse order so items can be efficiently popped
- # from a stack of chucks.
- chunks.reverse()
-
- while chunks:
-
- # Start the list of chunks that will make up the current line.
- # cur_len is just the length of all the chunks in cur_line.
- cur_line = []
- cur_len = 0
-
- # Figure out which static string will prefix this line.
- if lines:
- indent = self.subsequent_indent
- else:
- indent = self.initial_indent
-
- # Maximum width for this line.
- width = self.width - len(indent)
-
- # First chunk on line is whitespace -- drop it, unless this
- # is the very beginning of the text (ie. no lines started yet).
- if self.drop_whitespace and chunks[-1].strip() == '' and lines:
- del chunks[-1]
-
- while chunks:
- l = len(chunks[-1])
-
- # Can at least squeeze this chunk onto the current line.
- if cur_len + l <= width:
- cur_line.append(chunks.pop())
- cur_len += l
-
-
- else:
- break
-
- # The current line is full, and the next chunk is too big to
- # fit on *any* line (not just this one).
- if chunks and len(chunks[-1]) > width:
- self._handle_long_word(chunks, cur_line, cur_len, width)
-
- # If the last chunk on this line is all whitespace, drop it.
- if self.drop_whitespace and cur_line and not cur_line[-1].strip():
- del cur_line[-1]
-
- # Convert current line back to a string and store it in list
- # of all lines (return value).
- if cur_line:
- lines.append(indent + ''.join(cur_line))
-
- return lines
-# }}}
-# function wrap() {{{
-
-
-def wrap(text, width=70, **kwargs):
- """
- Wrap a single paragraph of text, returning a list of wrapped lines.
- """
- if sys.version_info < (2, 6):
- return TextWrapper(width=width, **kwargs).wrap(text)
- return textwrap.wrap(text, width=width, **kwargs)
-
-# }}}
diff --git a/python-packages/pyasn1/__init__.py b/python-packages/pyasn1/__init__.py
deleted file mode 100644
index 12101e7dbc..0000000000
--- a/python-packages/pyasn1/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import sys
-
-# http://www.python.org/dev/peps/pep-0396/
-__version__ = '0.1.4'
-
-if sys.version_info[:2] < (2, 4):
- raise RuntimeError('PyASN1 requires Python 2.4 or later')
-
diff --git a/python-packages/pyasn1/codec/__init__.py b/python-packages/pyasn1/codec/__init__.py
deleted file mode 100644
index 8c3066b2e6..0000000000
--- a/python-packages/pyasn1/codec/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This file is necessary to make this directory a package.
diff --git a/python-packages/pyasn1/codec/ber/__init__.py b/python-packages/pyasn1/codec/ber/__init__.py
deleted file mode 100644
index 8c3066b2e6..0000000000
--- a/python-packages/pyasn1/codec/ber/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This file is necessary to make this directory a package.
diff --git a/python-packages/pyasn1/codec/ber/decoder.py b/python-packages/pyasn1/codec/ber/decoder.py
deleted file mode 100644
index f63ae8c1f4..0000000000
--- a/python-packages/pyasn1/codec/ber/decoder.py
+++ /dev/null
@@ -1,773 +0,0 @@
-# BER decoder
-from pyasn1.type import tag, base, univ, char, useful, tagmap
-from pyasn1.codec.ber import eoo
-from pyasn1.compat.octets import oct2int, octs2ints, isOctetsType
-from pyasn1 import debug, error
-
-class AbstractDecoder:
- protoComponent = None
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- raise error.PyAsn1Error('Decoder not implemented for %s' % (tagSet,))
-
- def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- raise error.PyAsn1Error('Indefinite length mode decoder not implemented for %s' % (tagSet,))
-
-class AbstractSimpleDecoder(AbstractDecoder):
- def _createComponent(self, asn1Spec, tagSet, value=None):
- if asn1Spec is None:
- return self.protoComponent.clone(value, tagSet)
- elif value is None:
- return asn1Spec
- else:
- return asn1Spec.clone(value)
-
-class AbstractConstructedDecoder(AbstractDecoder):
- def _createComponent(self, asn1Spec, tagSet, value=None):
- if asn1Spec is None:
- return self.protoComponent.clone(tagSet)
- else:
- return asn1Spec.clone()
-
-class EndOfOctetsDecoder(AbstractSimpleDecoder):
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- return eoo.endOfOctets, substrate[length:]
-
-class ExplicitTagDecoder(AbstractSimpleDecoder):
- protoComponent = univ.Any('')
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- if substrateFun:
- return substrateFun(
- self._createComponent(asn1Spec, tagSet, ''),
- substrate, length
- )
- head, tail = substrate[:length], substrate[length:]
- value, _ = decodeFun(head, asn1Spec, tagSet, length)
- return value, tail
-
- def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- if substrateFun:
- return substrateFun(
- self._createComponent(asn1Spec, tagSet, ''),
- substrate, length
- )
- value, substrate = decodeFun(substrate, asn1Spec, tagSet, length)
- terminator, substrate = decodeFun(substrate)
- if terminator == eoo.endOfOctets:
- return value, substrate
- else:
- raise error.PyAsn1Error('Missing end-of-octets terminator')
-
-explicitTagDecoder = ExplicitTagDecoder()
-
-class IntegerDecoder(AbstractSimpleDecoder):
- protoComponent = univ.Integer(0)
- precomputedValues = {
- '\x00': 0,
- '\x01': 1,
- '\x02': 2,
- '\x03': 3,
- '\x04': 4,
- '\x05': 5,
- '\x06': 6,
- '\x07': 7,
- '\x08': 8,
- '\x09': 9,
- '\xff': -1,
- '\xfe': -2,
- '\xfd': -3,
- '\xfc': -4,
- '\xfb': -5
- }
-
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
- state, decodeFun, substrateFun):
- head, tail = substrate[:length], substrate[length:]
- if not head:
- raise error.PyAsn1Error('Empty substrate')
- if head in self.precomputedValues:
- value = self.precomputedValues[head]
- else:
- firstOctet = oct2int(head[0])
- if firstOctet & 0x80:
- value = -1
- else:
- value = 0
- for octet in head:
- value = value << 8 | oct2int(octet)
- return self._createComponent(asn1Spec, tagSet, value), tail
-
-class BooleanDecoder(IntegerDecoder):
- protoComponent = univ.Boolean(0)
- def _createComponent(self, asn1Spec, tagSet, value=None):
- return IntegerDecoder._createComponent(self, asn1Spec, tagSet, value and 1 or 0)
-
-class BitStringDecoder(AbstractSimpleDecoder):
- protoComponent = univ.BitString(())
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
- state, decodeFun, substrateFun):
- head, tail = substrate[:length], substrate[length:]
- if tagSet[0][1] == tag.tagFormatSimple: # XXX what tag to check?
- if not head:
- raise error.PyAsn1Error('Empty substrate')
- trailingBits = oct2int(head[0])
- if trailingBits > 7:
- raise error.PyAsn1Error(
- 'Trailing bits overflow %s' % trailingBits
- )
- head = head[1:]
- lsb = p = 0; l = len(head)-1; b = ()
- while p <= l:
- if p == l:
- lsb = trailingBits
- j = 7
- o = oct2int(head[p])
- while j >= lsb:
- b = b + ((o>>j)&0x01,)
- j = j - 1
- p = p + 1
- return self._createComponent(asn1Spec, tagSet, b), tail
- r = self._createComponent(asn1Spec, tagSet, ())
- if substrateFun:
- return substrateFun(r, substrate, length)
- while head:
- component, head = decodeFun(head)
- r = r + component
- return r, tail
-
- def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- r = self._createComponent(asn1Spec, tagSet, '')
- if substrateFun:
- return substrateFun(r, substrate, length)
- while substrate:
- component, substrate = decodeFun(substrate)
- if component == eoo.endOfOctets:
- break
- r = r + component
- else:
- raise error.SubstrateUnderrunError(
- 'No EOO seen before substrate ends'
- )
- return r, substrate
-
-class OctetStringDecoder(AbstractSimpleDecoder):
- protoComponent = univ.OctetString('')
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
- state, decodeFun, substrateFun):
- head, tail = substrate[:length], substrate[length:]
- if tagSet[0][1] == tag.tagFormatSimple: # XXX what tag to check?
- return self._createComponent(asn1Spec, tagSet, head), tail
- r = self._createComponent(asn1Spec, tagSet, '')
- if substrateFun:
- return substrateFun(r, substrate, length)
- while head:
- component, head = decodeFun(head)
- r = r + component
- return r, tail
-
- def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- r = self._createComponent(asn1Spec, tagSet, '')
- if substrateFun:
- return substrateFun(r, substrate, length)
- while substrate:
- component, substrate = decodeFun(substrate)
- if component == eoo.endOfOctets:
- break
- r = r + component
- else:
- raise error.SubstrateUnderrunError(
- 'No EOO seen before substrate ends'
- )
- return r, substrate
-
-class NullDecoder(AbstractSimpleDecoder):
- protoComponent = univ.Null('')
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- head, tail = substrate[:length], substrate[length:]
- r = self._createComponent(asn1Spec, tagSet)
- if head:
- raise error.PyAsn1Error('Unexpected %d-octet substrate for Null' % length)
- return r, tail
-
-class ObjectIdentifierDecoder(AbstractSimpleDecoder):
- protoComponent = univ.ObjectIdentifier(())
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
- state, decodeFun, substrateFun):
- head, tail = substrate[:length], substrate[length:]
- if not head:
- raise error.PyAsn1Error('Empty substrate')
-
- # Get the first subid
- subId = oct2int(head[0])
- oid = divmod(subId, 40)
-
- index = 1
- substrateLen = len(head)
- while index < substrateLen:
- subId = oct2int(head[index])
- index = index + 1
- if subId == 128:
- # ASN.1 spec forbids leading zeros (0x80) in sub-ID OID
- # encoding, tolerating it opens a vulnerability.
- # See http://www.cosic.esat.kuleuven.be/publications/article-1432.pdf page 7
- raise error.PyAsn1Error('Invalid leading 0x80 in sub-OID')
- elif subId > 128:
- # Construct subid from a number of octets
- nextSubId = subId
- subId = 0
- while nextSubId >= 128:
- subId = (subId << 7) + (nextSubId & 0x7F)
- if index >= substrateLen:
- raise error.SubstrateUnderrunError(
- 'Short substrate for sub-OID past %s' % (oid,)
- )
- nextSubId = oct2int(head[index])
- index = index + 1
- subId = (subId << 7) + nextSubId
- oid = oid + (subId,)
- return self._createComponent(asn1Spec, tagSet, oid), tail
-
-class RealDecoder(AbstractSimpleDecoder):
- protoComponent = univ.Real()
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- head, tail = substrate[:length], substrate[length:]
- if not head:
- raise error.SubstrateUnderrunError('Short substrate for Real')
- fo = oct2int(head[0]); head = head[1:]
- if fo & 0x40: # infinite value
- value = fo & 0x01 and '-inf' or 'inf'
- elif fo & 0x80: # binary enoding
- n = (fo & 0x03) + 1
- if n == 4:
- n = oct2int(head[0])
- eo, head = head[:n], head[n:]
- if not eo or not head:
- raise error.PyAsn1Error('Real exponent screwed')
- e = oct2int(eo[0]) & 0x80 and -1 or 0
- while eo: # exponent
- e <<= 8
- e |= oct2int(eo[0])
- eo = eo[1:]
- p = 0
- while head: # value
- p <<= 8
- p |= oct2int(head[0])
- head = head[1:]
- if fo & 0x40: # sign bit
- p = -p
- value = (p, 2, e)
- elif fo & 0xc0 == 0: # character encoding
- try:
- if fo & 0x3 == 0x1: # NR1
- value = (int(head), 10, 0)
- elif fo & 0x3 == 0x2: # NR2
- value = float(head)
- elif fo & 0x3 == 0x3: # NR3
- value = float(head)
- else:
- raise error.SubstrateUnderrunError(
- 'Unknown NR (tag %s)' % fo
- )
- except ValueError:
- raise error.SubstrateUnderrunError(
- 'Bad character Real syntax'
- )
- elif fo & 0xc0 == 0x40: # special real value
- pass
- else:
- raise error.SubstrateUnderrunError(
- 'Unknown encoding (tag %s)' % fo
- )
- return self._createComponent(asn1Spec, tagSet, value), tail
-
-class SequenceDecoder(AbstractConstructedDecoder):
- protoComponent = univ.Sequence()
- def _getComponentTagMap(self, r, idx):
- try:
- return r.getComponentTagMapNearPosition(idx)
- except error.PyAsn1Error:
- return
-
- def _getComponentPositionByType(self, r, t, idx):
- return r.getComponentPositionNearType(t, idx)
-
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- head, tail = substrate[:length], substrate[length:]
- r = self._createComponent(asn1Spec, tagSet)
- idx = 0
- if substrateFun:
- return substrateFun(r, substrate, length)
- while head:
- asn1Spec = self._getComponentTagMap(r, idx)
- component, head = decodeFun(head, asn1Spec)
- idx = self._getComponentPositionByType(
- r, component.getEffectiveTagSet(), idx
- )
- r.setComponentByPosition(idx, component, asn1Spec is None)
- idx = idx + 1
- r.setDefaultComponents()
- r.verifySizeSpec()
- return r, tail
-
- def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- r = self._createComponent(asn1Spec, tagSet)
- if substrateFun:
- return substrateFun(r, substrate, length)
- idx = 0
- while substrate:
- asn1Spec = self._getComponentTagMap(r, idx)
- component, substrate = decodeFun(substrate, asn1Spec)
- if component == eoo.endOfOctets:
- break
- idx = self._getComponentPositionByType(
- r, component.getEffectiveTagSet(), idx
- )
- r.setComponentByPosition(idx, component, asn1Spec is None)
- idx = idx + 1
- else:
- raise error.SubstrateUnderrunError(
- 'No EOO seen before substrate ends'
- )
- r.setDefaultComponents()
- r.verifySizeSpec()
- return r, substrate
-
-class SequenceOfDecoder(AbstractConstructedDecoder):
- protoComponent = univ.SequenceOf()
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- head, tail = substrate[:length], substrate[length:]
- r = self._createComponent(asn1Spec, tagSet)
- if substrateFun:
- return substrateFun(r, substrate, length)
- asn1Spec = r.getComponentType()
- idx = 0
- while head:
- component, head = decodeFun(head, asn1Spec)
- r.setComponentByPosition(idx, component, asn1Spec is None)
- idx = idx + 1
- r.verifySizeSpec()
- return r, tail
-
- def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- r = self._createComponent(asn1Spec, tagSet)
- if substrateFun:
- return substrateFun(r, substrate, length)
- asn1Spec = r.getComponentType()
- idx = 0
- while substrate:
- component, substrate = decodeFun(substrate, asn1Spec)
- if component == eoo.endOfOctets:
- break
- r.setComponentByPosition(idx, component, asn1Spec is None)
- idx = idx + 1
- else:
- raise error.SubstrateUnderrunError(
- 'No EOO seen before substrate ends'
- )
- r.verifySizeSpec()
- return r, substrate
-
-class SetDecoder(SequenceDecoder):
- protoComponent = univ.Set()
- def _getComponentTagMap(self, r, idx):
- return r.getComponentTagMap()
-
- def _getComponentPositionByType(self, r, t, idx):
- nextIdx = r.getComponentPositionByType(t)
- if nextIdx is None:
- return idx
- else:
- return nextIdx
-
-class SetOfDecoder(SequenceOfDecoder):
- protoComponent = univ.SetOf()
-
-class ChoiceDecoder(AbstractConstructedDecoder):
- protoComponent = univ.Choice()
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- head, tail = substrate[:length], substrate[length:]
- r = self._createComponent(asn1Spec, tagSet)
- if substrateFun:
- return substrateFun(r, substrate, length)
- if r.getTagSet() == tagSet: # explicitly tagged Choice
- component, head = decodeFun(
- head, r.getComponentTagMap()
- )
- else:
- component, head = decodeFun(
- head, r.getComponentTagMap(), tagSet, length, state
- )
- if isinstance(component, univ.Choice):
- effectiveTagSet = component.getEffectiveTagSet()
- else:
- effectiveTagSet = component.getTagSet()
- r.setComponentByType(effectiveTagSet, component, 0, asn1Spec is None)
- return r, tail
-
- indefLenValueDecoder = valueDecoder
-
-class AnyDecoder(AbstractSimpleDecoder):
- protoComponent = univ.Any()
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- if asn1Spec is None or \
- asn1Spec is not None and tagSet != asn1Spec.getTagSet():
- # untagged Any container, recover inner header substrate
- length = length + len(fullSubstrate) - len(substrate)
- substrate = fullSubstrate
- if substrateFun:
- return substrateFun(self._createComponent(asn1Spec, tagSet),
- substrate, length)
- head, tail = substrate[:length], substrate[length:]
- return self._createComponent(asn1Spec, tagSet, value=head), tail
-
- def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
- length, state, decodeFun, substrateFun):
- if asn1Spec is not None and tagSet == asn1Spec.getTagSet():
- # tagged Any type -- consume header substrate
- header = ''
- else:
- # untagged Any, recover header substrate
- header = fullSubstrate[:-len(substrate)]
-
- r = self._createComponent(asn1Spec, tagSet, header)
-
- # Any components do not inherit initial tag
- asn1Spec = self.protoComponent
-
- if substrateFun:
- return substrateFun(r, substrate, length)
- while substrate:
- component, substrate = decodeFun(substrate, asn1Spec)
- if component == eoo.endOfOctets:
- break
- r = r + component
- else:
- raise error.SubstrateUnderrunError(
- 'No EOO seen before substrate ends'
- )
- return r, substrate
-
-# character string types
-class UTF8StringDecoder(OctetStringDecoder):
- protoComponent = char.UTF8String()
-class NumericStringDecoder(OctetStringDecoder):
- protoComponent = char.NumericString()
-class PrintableStringDecoder(OctetStringDecoder):
- protoComponent = char.PrintableString()
-class TeletexStringDecoder(OctetStringDecoder):
- protoComponent = char.TeletexString()
-class VideotexStringDecoder(OctetStringDecoder):
- protoComponent = char.VideotexString()
-class IA5StringDecoder(OctetStringDecoder):
- protoComponent = char.IA5String()
-class GraphicStringDecoder(OctetStringDecoder):
- protoComponent = char.GraphicString()
-class VisibleStringDecoder(OctetStringDecoder):
- protoComponent = char.VisibleString()
-class GeneralStringDecoder(OctetStringDecoder):
- protoComponent = char.GeneralString()
-class UniversalStringDecoder(OctetStringDecoder):
- protoComponent = char.UniversalString()
-class BMPStringDecoder(OctetStringDecoder):
- protoComponent = char.BMPString()
-
-# "useful" types
-class GeneralizedTimeDecoder(OctetStringDecoder):
- protoComponent = useful.GeneralizedTime()
-class UTCTimeDecoder(OctetStringDecoder):
- protoComponent = useful.UTCTime()
-
-tagMap = {
- eoo.endOfOctets.tagSet: EndOfOctetsDecoder(),
- univ.Integer.tagSet: IntegerDecoder(),
- univ.Boolean.tagSet: BooleanDecoder(),
- univ.BitString.tagSet: BitStringDecoder(),
- univ.OctetString.tagSet: OctetStringDecoder(),
- univ.Null.tagSet: NullDecoder(),
- univ.ObjectIdentifier.tagSet: ObjectIdentifierDecoder(),
- univ.Enumerated.tagSet: IntegerDecoder(),
- univ.Real.tagSet: RealDecoder(),
- univ.Sequence.tagSet: SequenceDecoder(), # conflicts with SequenceOf
- univ.Set.tagSet: SetDecoder(), # conflicts with SetOf
- univ.Choice.tagSet: ChoiceDecoder(), # conflicts with Any
- # character string types
- char.UTF8String.tagSet: UTF8StringDecoder(),
- char.NumericString.tagSet: NumericStringDecoder(),
- char.PrintableString.tagSet: PrintableStringDecoder(),
- char.TeletexString.tagSet: TeletexStringDecoder(),
- char.VideotexString.tagSet: VideotexStringDecoder(),
- char.IA5String.tagSet: IA5StringDecoder(),
- char.GraphicString.tagSet: GraphicStringDecoder(),
- char.VisibleString.tagSet: VisibleStringDecoder(),
- char.GeneralString.tagSet: GeneralStringDecoder(),
- char.UniversalString.tagSet: UniversalStringDecoder(),
- char.BMPString.tagSet: BMPStringDecoder(),
- # useful types
- useful.GeneralizedTime.tagSet: GeneralizedTimeDecoder(),
- useful.UTCTime.tagSet: UTCTimeDecoder()
- }
-
-# Type-to-codec map for ambiguous ASN.1 types
-typeMap = {
- univ.Set.typeId: SetDecoder(),
- univ.SetOf.typeId: SetOfDecoder(),
- univ.Sequence.typeId: SequenceDecoder(),
- univ.SequenceOf.typeId: SequenceOfDecoder(),
- univ.Choice.typeId: ChoiceDecoder(),
- univ.Any.typeId: AnyDecoder()
- }
-
-( stDecodeTag, stDecodeLength, stGetValueDecoder, stGetValueDecoderByAsn1Spec,
- stGetValueDecoderByTag, stTryAsExplicitTag, stDecodeValue,
- stDumpRawValue, stErrorCondition, stStop ) = [x for x in range(10)]
-
-class Decoder:
- defaultErrorState = stErrorCondition
-# defaultErrorState = stDumpRawValue
- defaultRawDecoder = AnyDecoder()
- def __init__(self, tagMap, typeMap={}):
- self.__tagMap = tagMap
- self.__typeMap = typeMap
- self.__endOfOctetsTagSet = eoo.endOfOctets.getTagSet()
- # Tag & TagSet objects caches
- self.__tagCache = {}
- self.__tagSetCache = {}
-
- def __call__(self, substrate, asn1Spec=None, tagSet=None,
- length=None, state=stDecodeTag, recursiveFlag=1,
- substrateFun=None):
- if debug.logger & debug.flagDecoder:
- debug.logger('decoder called at scope %s with state %d, working with up to %d octets of substrate: %s' % (debug.scope, state, len(substrate), debug.hexdump(substrate)))
- fullSubstrate = substrate
- while state != stStop:
- if state == stDecodeTag:
- # Decode tag
- if not substrate:
- raise error.SubstrateUnderrunError(
- 'Short octet stream on tag decoding'
- )
- if not isOctetsType(substrate) and \
- not isinstance(substrate, univ.OctetString):
- raise error.PyAsn1Error('Bad octet stream type')
-
- firstOctet = substrate[0]
- substrate = substrate[1:]
- if firstOctet in self.__tagCache:
- lastTag = self.__tagCache[firstOctet]
- else:
- t = oct2int(firstOctet)
- tagClass = t&0xC0
- tagFormat = t&0x20
- tagId = t&0x1F
- if tagId == 0x1F:
- tagId = 0
- while 1:
- if not substrate:
- raise error.SubstrateUnderrunError(
- 'Short octet stream on long tag decoding'
- )
- t = oct2int(substrate[0])
- tagId = tagId << 7 | (t&0x7F)
- substrate = substrate[1:]
- if not t&0x80:
- break
- lastTag = tag.Tag(
- tagClass=tagClass, tagFormat=tagFormat, tagId=tagId
- )
- if tagId < 31:
- # cache short tags
- self.__tagCache[firstOctet] = lastTag
- if tagSet is None:
- if firstOctet in self.__tagSetCache:
- tagSet = self.__tagSetCache[firstOctet]
- else:
- # base tag not recovered
- tagSet = tag.TagSet((), lastTag)
- if firstOctet in self.__tagCache:
- self.__tagSetCache[firstOctet] = tagSet
- else:
- tagSet = lastTag + tagSet
- state = stDecodeLength
- debug.logger and debug.logger & debug.flagDecoder and debug.logger('tag decoded into %r, decoding length' % tagSet)
- if state == stDecodeLength:
- # Decode length
- if not substrate:
- raise error.SubstrateUnderrunError(
- 'Short octet stream on length decoding'
- )
- firstOctet = oct2int(substrate[0])
- if firstOctet == 128:
- size = 1
- length = -1
- elif firstOctet < 128:
- length, size = firstOctet, 1
- else:
- size = firstOctet & 0x7F
- # encoded in size bytes
- length = 0
- lengthString = substrate[1:size+1]
- # missing check on maximum size, which shouldn't be a
- # problem, we can handle more than is possible
- if len(lengthString) != size:
- raise error.SubstrateUnderrunError(
- '%s<%s at %s' %
- (size, len(lengthString), tagSet)
- )
- for char in lengthString:
- length = (length << 8) | oct2int(char)
- size = size + 1
- substrate = substrate[size:]
- if length != -1 and len(substrate) < length:
- raise error.SubstrateUnderrunError(
- '%d-octet short' % (length - len(substrate))
- )
- state = stGetValueDecoder
- debug.logger and debug.logger & debug.flagDecoder and debug.logger('value length decoded into %d, payload substrate is: %s' % (length, debug.hexdump(substrate[:length])))
- if state == stGetValueDecoder:
- if asn1Spec is None:
- state = stGetValueDecoderByTag
- else:
- state = stGetValueDecoderByAsn1Spec
- #
- # There're two ways of creating subtypes in ASN.1 what influences
- # decoder operation. These methods are:
- # 1) Either base types used in or no IMPLICIT tagging has been
- # applied on subtyping.
- # 2) Subtype syntax drops base type information (by means of
- # IMPLICIT tagging.
- # The first case allows for complete tag recovery from substrate
- # while the second one requires original ASN.1 type spec for
- # decoding.
- #
- # In either case a set of tags (tagSet) is coming from substrate
- # in an incremental, tag-by-tag fashion (this is the case of
- # EXPLICIT tag which is most basic). Outermost tag comes first
- # from the wire.
- #
- if state == stGetValueDecoderByTag:
- if tagSet in self.__tagMap:
- concreteDecoder = self.__tagMap[tagSet]
- else:
- concreteDecoder = None
- if concreteDecoder:
- state = stDecodeValue
- else:
- _k = tagSet[:1]
- if _k in self.__tagMap:
- concreteDecoder = self.__tagMap[_k]
- else:
- concreteDecoder = None
- if concreteDecoder:
- state = stDecodeValue
- else:
- state = stTryAsExplicitTag
- if debug.logger and debug.logger & debug.flagDecoder:
- debug.logger('codec %s chosen by a built-in type, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "", state == stDecodeValue and 'value' or 'as explicit tag'))
- debug.scope.push(concreteDecoder is None and '?' or concreteDecoder.protoComponent.__class__.__name__)
- if state == stGetValueDecoderByAsn1Spec:
- if isinstance(asn1Spec, (dict, tagmap.TagMap)):
- if tagSet in asn1Spec:
- __chosenSpec = asn1Spec[tagSet]
- else:
- __chosenSpec = None
- if debug.logger and debug.logger & debug.flagDecoder:
- debug.logger('candidate ASN.1 spec is a map of:')
- for t, v in asn1Spec.getPosMap().items():
- debug.logger(' %r -> %s' % (t, v.__class__.__name__))
- if asn1Spec.getNegMap():
- debug.logger('but neither of: ')
- for i in asn1Spec.getNegMap().items():
- debug.logger(' %r -> %s' % (t, v.__class__.__name__))
- debug.logger('new candidate ASN.1 spec is %s, chosen by %r' % (__chosenSpec is None and '' or __chosenSpec.__class__.__name__, tagSet))
- else:
- __chosenSpec = asn1Spec
- debug.logger and debug.logger & debug.flagDecoder and debug.logger('candidate ASN.1 spec is %s' % asn1Spec.__class__.__name__)
- if __chosenSpec is not None and (
- tagSet == __chosenSpec.getTagSet() or \
- tagSet in __chosenSpec.getTagMap()
- ):
- # use base type for codec lookup to recover untagged types
- baseTagSet = __chosenSpec.baseTagSet
- if __chosenSpec.typeId is not None and \
- __chosenSpec.typeId in self.__typeMap:
- # ambiguous type
- concreteDecoder = self.__typeMap[__chosenSpec.typeId]
- debug.logger and debug.logger & debug.flagDecoder and debug.logger('value decoder chosen for an ambiguous type by type ID %s' % (__chosenSpec.typeId,))
- elif baseTagSet in self.__tagMap:
- # base type or tagged subtype
- concreteDecoder = self.__tagMap[baseTagSet]
- debug.logger and debug.logger & debug.flagDecoder and debug.logger('value decoder chosen by base %r' % (baseTagSet,))
- else:
- concreteDecoder = None
- if concreteDecoder:
- asn1Spec = __chosenSpec
- state = stDecodeValue
- else:
- state = stTryAsExplicitTag
- elif tagSet == self.__endOfOctetsTagSet:
- concreteDecoder = self.__tagMap[tagSet]
- state = stDecodeValue
- debug.logger and debug.logger & debug.flagDecoder and debug.logger('end-of-octets found')
- else:
- concreteDecoder = None
- state = stTryAsExplicitTag
- if debug.logger and debug.logger & debug.flagDecoder:
- debug.logger('codec %s chosen by ASN.1 spec, decoding %s' % (state == stDecodeValue and concreteDecoder.__class__.__name__ or "", state == stDecodeValue and 'value' or 'as explicit tag'))
- debug.scope.push(__chosenSpec is None and '?' or __chosenSpec.__class__.__name__)
- if state == stTryAsExplicitTag:
- if tagSet and \
- tagSet[0][1] == tag.tagFormatConstructed and \
- tagSet[0][0] != tag.tagClassUniversal:
- # Assume explicit tagging
- concreteDecoder = explicitTagDecoder
- state = stDecodeValue
- else:
- concreteDecoder = None
- state = self.defaultErrorState
- debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s chosen, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "", state == stDecodeValue and 'value' or 'as failure'))
- if state == stDumpRawValue:
- concreteDecoder = self.defaultRawDecoder
- debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s chosen, decoding value' % concreteDecoder.__class__.__name__)
- state = stDecodeValue
- if state == stDecodeValue:
- if recursiveFlag == 0 and not substrateFun: # legacy
- substrateFun = lambda a,b,c: (a,b[:c])
- if length == -1: # indef length
- value, substrate = concreteDecoder.indefLenValueDecoder(
- fullSubstrate, substrate, asn1Spec, tagSet, length,
- stGetValueDecoder, self, substrateFun
- )
- else:
- value, substrate = concreteDecoder.valueDecoder(
- fullSubstrate, substrate, asn1Spec, tagSet, length,
- stGetValueDecoder, self, substrateFun
- )
- state = stStop
- debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s yields type %s, value:\n%s\n...remaining substrate is: %s' % (concreteDecoder.__class__.__name__, value.__class__.__name__, value.prettyPrint(), substrate and debug.hexdump(substrate) or ''))
- if state == stErrorCondition:
- raise error.PyAsn1Error(
- '%r not in asn1Spec: %r' % (tagSet, asn1Spec)
- )
- if debug.logger and debug.logger & debug.flagDecoder:
- debug.scope.pop()
- debug.logger('decoder left scope %s, call completed' % debug.scope)
- return value, substrate
-
-decode = Decoder(tagMap, typeMap)
-
-# XXX
-# non-recursive decoding; return position rather than substrate
diff --git a/python-packages/pyasn1/codec/ber/encoder.py b/python-packages/pyasn1/codec/ber/encoder.py
deleted file mode 100644
index 181fbdebdf..0000000000
--- a/python-packages/pyasn1/codec/ber/encoder.py
+++ /dev/null
@@ -1,337 +0,0 @@
-# BER encoder
-from pyasn1.type import base, tag, univ, char, useful
-from pyasn1.codec.ber import eoo
-from pyasn1.compat.octets import int2oct, oct2int, ints2octs, null, str2octs
-from pyasn1 import debug, error
-
-class Error(Exception): pass
-
-class AbstractItemEncoder:
- supportIndefLenMode = 1
- def encodeTag(self, t, isConstructed):
- tagClass, tagFormat, tagId = t.asTuple() # this is a hotspot
- v = tagClass | tagFormat
- if isConstructed:
- v = v|tag.tagFormatConstructed
- if tagId < 31:
- return int2oct(v|tagId)
- else:
- s = int2oct(tagId&0x7f)
- tagId = tagId >> 7
- while tagId:
- s = int2oct(0x80|(tagId&0x7f)) + s
- tagId = tagId >> 7
- return int2oct(v|0x1F) + s
-
- def encodeLength(self, length, defMode):
- if not defMode and self.supportIndefLenMode:
- return int2oct(0x80)
- if length < 0x80:
- return int2oct(length)
- else:
- substrate = null
- while length:
- substrate = int2oct(length&0xff) + substrate
- length = length >> 8
- substrateLen = len(substrate)
- if substrateLen > 126:
- raise Error('Length octets overflow (%d)' % substrateLen)
- return int2oct(0x80 | substrateLen) + substrate
-
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- raise Error('Not implemented')
-
- def _encodeEndOfOctets(self, encodeFun, defMode):
- if defMode or not self.supportIndefLenMode:
- return null
- else:
- return encodeFun(eoo.endOfOctets, defMode)
-
- def encode(self, encodeFun, value, defMode, maxChunkSize):
- substrate, isConstructed = self.encodeValue(
- encodeFun, value, defMode, maxChunkSize
- )
- tagSet = value.getTagSet()
- if tagSet:
- if not isConstructed: # primitive form implies definite mode
- defMode = 1
- return self.encodeTag(
- tagSet[-1], isConstructed
- ) + self.encodeLength(
- len(substrate), defMode
- ) + substrate + self._encodeEndOfOctets(encodeFun, defMode)
- else:
- return substrate # untagged value
-
-class EndOfOctetsEncoder(AbstractItemEncoder):
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- return null, 0
-
-class ExplicitlyTaggedItemEncoder(AbstractItemEncoder):
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- if isinstance(value, base.AbstractConstructedAsn1Item):
- value = value.clone(tagSet=value.getTagSet()[:-1],
- cloneValueFlag=1)
- else:
- value = value.clone(tagSet=value.getTagSet()[:-1])
- return encodeFun(value, defMode, maxChunkSize), 1
-
-explicitlyTaggedItemEncoder = ExplicitlyTaggedItemEncoder()
-
-class IntegerEncoder(AbstractItemEncoder):
- supportIndefLenMode = 0
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- octets = []
- value = int(value) # to save on ops on asn1 type
- while 1:
- octets.insert(0, value & 0xff)
- if value == 0 or value == -1:
- break
- value = value >> 8
- if value == 0 and octets[0] & 0x80:
- octets.insert(0, 0)
- while len(octets) > 1 and \
- (octets[0] == 0 and octets[1] & 0x80 == 0 or \
- octets[0] == 0xff and octets[1] & 0x80 != 0):
- del octets[0]
- return ints2octs(octets), 0
-
-class BitStringEncoder(AbstractItemEncoder):
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- if not maxChunkSize or len(value) <= maxChunkSize*8:
- r = {}; l = len(value); p = 0; j = 7
- while p < l:
- i, j = divmod(p, 8)
- r[i] = r.get(i,0) | value[p]<<(7-j)
- p = p + 1
- keys = list(r); keys.sort()
- return int2oct(7-j) + ints2octs([r[k] for k in keys]), 0
- else:
- pos = 0; substrate = null
- while 1:
- # count in octets
- v = value.clone(value[pos*8:pos*8+maxChunkSize*8])
- if not v:
- break
- substrate = substrate + encodeFun(v, defMode, maxChunkSize)
- pos = pos + maxChunkSize
- return substrate, 1
-
-class OctetStringEncoder(AbstractItemEncoder):
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- if not maxChunkSize or len(value) <= maxChunkSize:
- return value.asOctets(), 0
- else:
- pos = 0; substrate = null
- while 1:
- v = value.clone(value[pos:pos+maxChunkSize])
- if not v:
- break
- substrate = substrate + encodeFun(v, defMode, maxChunkSize)
- pos = pos + maxChunkSize
- return substrate, 1
-
-class NullEncoder(AbstractItemEncoder):
- supportIndefLenMode = 0
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- return null, 0
-
-class ObjectIdentifierEncoder(AbstractItemEncoder):
- supportIndefLenMode = 0
- precomputedValues = {
- (1, 3, 6, 1, 2): (43, 6, 1, 2),
- (1, 3, 6, 1, 4): (43, 6, 1, 4)
- }
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- oid = value.asTuple()
- if oid[:5] in self.precomputedValues:
- octets = self.precomputedValues[oid[:5]]
- index = 5
- else:
- if len(oid) < 2:
- raise error.PyAsn1Error('Short OID %s' % (value,))
-
- # Build the first twos
- if oid[0] > 6 or oid[1] > 39 or oid[0] == 6 and oid[1] > 15:
- raise error.PyAsn1Error(
- 'Initial sub-ID overflow %s in OID %s' % (oid[:2], value)
- )
- octets = (oid[0] * 40 + oid[1],)
- index = 2
-
- # Cycle through subids
- for subid in oid[index:]:
- if subid > -1 and subid < 128:
- # Optimize for the common case
- octets = octets + (subid & 0x7f,)
- elif subid < 0 or subid > 0xFFFFFFFF:
- raise error.PyAsn1Error(
- 'SubId overflow %s in %s' % (subid, value)
- )
- else:
- # Pack large Sub-Object IDs
- res = (subid & 0x7f,)
- subid = subid >> 7
- while subid > 0:
- res = (0x80 | (subid & 0x7f),) + res
- subid = subid >> 7
- # Add packed Sub-Object ID to resulted Object ID
- octets += res
-
- return ints2octs(octets), 0
-
-class RealEncoder(AbstractItemEncoder):
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- if value.isPlusInfinity():
- return int2oct(0x40), 0
- if value.isMinusInfinity():
- return int2oct(0x41), 0
- m, b, e = value
- if not m:
- return null, 0
- if b == 10:
- return str2octs('\x03%dE%s%d' % (m, e == 0 and '+' or '', e)), 0
- elif b == 2:
- fo = 0x80 # binary enoding
- if m < 0:
- fo = fo | 0x40 # sign bit
- m = -m
- while int(m) != m: # drop floating point
- m *= 2
- e -= 1
- while m & 0x1 == 0: # mantissa normalization
- m >>= 1
- e += 1
- eo = null
- while e not in (0, -1):
- eo = int2oct(e&0xff) + eo
- e >>= 8
- if e == 0 and eo and oct2int(eo[0]) & 0x80:
- eo = int2oct(0) + eo
- n = len(eo)
- if n > 0xff:
- raise error.PyAsn1Error('Real exponent overflow')
- if n == 1:
- pass
- elif n == 2:
- fo |= 1
- elif n == 3:
- fo |= 2
- else:
- fo |= 3
- eo = int2oct(n//0xff+1) + eo
- po = null
- while m:
- po = int2oct(m&0xff) + po
- m >>= 8
- substrate = int2oct(fo) + eo + po
- return substrate, 0
- else:
- raise error.PyAsn1Error('Prohibited Real base %s' % b)
-
-class SequenceEncoder(AbstractItemEncoder):
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- value.setDefaultComponents()
- value.verifySizeSpec()
- substrate = null; idx = len(value)
- while idx > 0:
- idx = idx - 1
- if value[idx] is None: # Optional component
- continue
- component = value.getDefaultComponentByPosition(idx)
- if component is not None and component == value[idx]:
- continue
- substrate = encodeFun(
- value[idx], defMode, maxChunkSize
- ) + substrate
- return substrate, 1
-
-class SequenceOfEncoder(AbstractItemEncoder):
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- value.verifySizeSpec()
- substrate = null; idx = len(value)
- while idx > 0:
- idx = idx - 1
- substrate = encodeFun(
- value[idx], defMode, maxChunkSize
- ) + substrate
- return substrate, 1
-
-class ChoiceEncoder(AbstractItemEncoder):
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- return encodeFun(value.getComponent(), defMode, maxChunkSize), 1
-
-class AnyEncoder(OctetStringEncoder):
- def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
- return value.asOctets(), defMode == 0
-
-tagMap = {
- eoo.endOfOctets.tagSet: EndOfOctetsEncoder(),
- univ.Boolean.tagSet: IntegerEncoder(),
- univ.Integer.tagSet: IntegerEncoder(),
- univ.BitString.tagSet: BitStringEncoder(),
- univ.OctetString.tagSet: OctetStringEncoder(),
- univ.Null.tagSet: NullEncoder(),
- univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
- univ.Enumerated.tagSet: IntegerEncoder(),
- univ.Real.tagSet: RealEncoder(),
- # Sequence & Set have same tags as SequenceOf & SetOf
- univ.SequenceOf.tagSet: SequenceOfEncoder(),
- univ.SetOf.tagSet: SequenceOfEncoder(),
- univ.Choice.tagSet: ChoiceEncoder(),
- # character string types
- char.UTF8String.tagSet: OctetStringEncoder(),
- char.NumericString.tagSet: OctetStringEncoder(),
- char.PrintableString.tagSet: OctetStringEncoder(),
- char.TeletexString.tagSet: OctetStringEncoder(),
- char.VideotexString.tagSet: OctetStringEncoder(),
- char.IA5String.tagSet: OctetStringEncoder(),
- char.GraphicString.tagSet: OctetStringEncoder(),
- char.VisibleString.tagSet: OctetStringEncoder(),
- char.GeneralString.tagSet: OctetStringEncoder(),
- char.UniversalString.tagSet: OctetStringEncoder(),
- char.BMPString.tagSet: OctetStringEncoder(),
- # useful types
- useful.GeneralizedTime.tagSet: OctetStringEncoder(),
- useful.UTCTime.tagSet: OctetStringEncoder()
- }
-
-# Type-to-codec map for ambiguous ASN.1 types
-typeMap = {
- univ.Set.typeId: SequenceEncoder(),
- univ.SetOf.typeId: SequenceOfEncoder(),
- univ.Sequence.typeId: SequenceEncoder(),
- univ.SequenceOf.typeId: SequenceOfEncoder(),
- univ.Choice.typeId: ChoiceEncoder(),
- univ.Any.typeId: AnyEncoder()
- }
-
-class Encoder:
- def __init__(self, tagMap, typeMap={}):
- self.__tagMap = tagMap
- self.__typeMap = typeMap
-
- def __call__(self, value, defMode=1, maxChunkSize=0):
- debug.logger & debug.flagEncoder and debug.logger('encoder called for type %s, value:\n%s' % (value.__class__.__name__, value.prettyPrint()))
- tagSet = value.getTagSet()
- if len(tagSet) > 1:
- concreteEncoder = explicitlyTaggedItemEncoder
- else:
- if value.typeId is not None and value.typeId in self.__typeMap:
- concreteEncoder = self.__typeMap[value.typeId]
- elif tagSet in self.__tagMap:
- concreteEncoder = self.__tagMap[tagSet]
- else:
- tagSet = value.baseTagSet
- if tagSet in self.__tagMap:
- concreteEncoder = self.__tagMap[tagSet]
- else:
- raise Error('No encoder for %s' % (value,))
- debug.logger & debug.flagEncoder and debug.logger('using value codec %s chosen by %r' % (concreteEncoder.__class__.__name__, tagSet))
- substrate = concreteEncoder.encode(
- self, value, defMode, maxChunkSize
- )
- debug.logger & debug.flagEncoder and debug.logger('built %s octets of substrate: %s\nencoder completed' % (len(substrate), debug.hexdump(substrate)))
- return substrate
-
-encode = Encoder(tagMap, typeMap)
diff --git a/python-packages/pyasn1/codec/ber/eoo.py b/python-packages/pyasn1/codec/ber/eoo.py
deleted file mode 100644
index 379be19965..0000000000
--- a/python-packages/pyasn1/codec/ber/eoo.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from pyasn1.type import base, tag
-
-class EndOfOctets(base.AbstractSimpleAsn1Item):
- defaultValue = 0
- tagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x00)
- )
-endOfOctets = EndOfOctets()
diff --git a/python-packages/pyasn1/codec/cer/__init__.py b/python-packages/pyasn1/codec/cer/__init__.py
deleted file mode 100644
index 8c3066b2e6..0000000000
--- a/python-packages/pyasn1/codec/cer/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This file is necessary to make this directory a package.
diff --git a/python-packages/pyasn1/codec/cer/decoder.py b/python-packages/pyasn1/codec/cer/decoder.py
deleted file mode 100644
index 9fd37c1347..0000000000
--- a/python-packages/pyasn1/codec/cer/decoder.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# CER decoder
-from pyasn1.type import univ
-from pyasn1.codec.ber import decoder
-from pyasn1.compat.octets import oct2int
-from pyasn1 import error
-
-class BooleanDecoder(decoder.AbstractSimpleDecoder):
- protoComponent = univ.Boolean(0)
- def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
- state, decodeFun, substrateFun):
- head, tail = substrate[:length], substrate[length:]
- if not head:
- raise error.PyAsn1Error('Empty substrate')
- byte = oct2int(head[0])
- # CER/DER specifies encoding of TRUE as 0xFF and FALSE as 0x0, while
- # BER allows any non-zero value as TRUE; cf. sections 8.2.2. and 11.1
- # in http://www.itu.int/ITU-T/studygroups/com17/languages/X.690-0207.pdf
- if byte == 0xff:
- value = 1
- elif byte == 0x00:
- value = 0
- else:
- raise error.PyAsn1Error('Boolean CER violation: %s' % byte)
- return self._createComponent(asn1Spec, tagSet, value), tail
-
-tagMap = decoder.tagMap.copy()
-tagMap.update({
- univ.Boolean.tagSet: BooleanDecoder()
- })
-
-typeMap = decoder.typeMap
-
-class Decoder(decoder.Decoder): pass
-
-decode = Decoder(tagMap, decoder.typeMap)
diff --git a/python-packages/pyasn1/codec/cer/encoder.py b/python-packages/pyasn1/codec/cer/encoder.py
deleted file mode 100644
index 4c05130af9..0000000000
--- a/python-packages/pyasn1/codec/cer/encoder.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# CER encoder
-from pyasn1.type import univ
-from pyasn1.codec.ber import encoder
-from pyasn1.compat.octets import int2oct, null
-
-class BooleanEncoder(encoder.IntegerEncoder):
- def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
- if client == 0:
- substrate = int2oct(0)
- else:
- substrate = int2oct(255)
- return substrate, 0
-
-class BitStringEncoder(encoder.BitStringEncoder):
- def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
- return encoder.BitStringEncoder.encodeValue(
- self, encodeFun, client, defMode, 1000
- )
-
-class OctetStringEncoder(encoder.OctetStringEncoder):
- def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
- return encoder.OctetStringEncoder.encodeValue(
- self, encodeFun, client, defMode, 1000
- )
-
-# specialized RealEncoder here
-# specialized GeneralStringEncoder here
-# specialized GeneralizedTimeEncoder here
-# specialized UTCTimeEncoder here
-
-class SetOfEncoder(encoder.SequenceOfEncoder):
- def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
- if isinstance(client, univ.SequenceAndSetBase):
- client.setDefaultComponents()
- client.verifySizeSpec()
- substrate = null; idx = len(client)
- # This is certainly a hack but how else do I distinguish SetOf
- # from Set if they have the same tags&constraints?
- if isinstance(client, univ.SequenceAndSetBase):
- # Set
- comps = []
- while idx > 0:
- idx = idx - 1
- if client[idx] is None: # Optional component
- continue
- if client.getDefaultComponentByPosition(idx) == client[idx]:
- continue
- comps.append(client[idx])
- comps.sort(key=lambda x: isinstance(x, univ.Choice) and \
- x.getMinTagSet() or x.getTagSet())
- for c in comps:
- substrate += encodeFun(c, defMode, maxChunkSize)
- else:
- # SetOf
- compSubs = []
- while idx > 0:
- idx = idx - 1
- compSubs.append(
- encodeFun(client[idx], defMode, maxChunkSize)
- )
- compSubs.sort() # perhaps padding's not needed
- substrate = null
- for compSub in compSubs:
- substrate += compSub
- return substrate, 1
-
-tagMap = encoder.tagMap.copy()
-tagMap.update({
- univ.Boolean.tagSet: BooleanEncoder(),
- univ.BitString.tagSet: BitStringEncoder(),
- univ.OctetString.tagSet: OctetStringEncoder(),
- univ.SetOf().tagSet: SetOfEncoder() # conflcts with Set
- })
-
-typeMap = encoder.typeMap.copy()
-typeMap.update({
- univ.Set.typeId: SetOfEncoder(),
- univ.SetOf.typeId: SetOfEncoder()
- })
-
-class Encoder(encoder.Encoder):
- def __call__(self, client, defMode=0, maxChunkSize=0):
- return encoder.Encoder.__call__(self, client, defMode, maxChunkSize)
-
-encode = Encoder(tagMap, typeMap)
-
-# EncoderFactory queries class instance and builds a map of tags -> encoders
diff --git a/python-packages/pyasn1/codec/der/__init__.py b/python-packages/pyasn1/codec/der/__init__.py
deleted file mode 100644
index 8c3066b2e6..0000000000
--- a/python-packages/pyasn1/codec/der/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This file is necessary to make this directory a package.
diff --git a/python-packages/pyasn1/codec/der/decoder.py b/python-packages/pyasn1/codec/der/decoder.py
deleted file mode 100644
index 604abec2bc..0000000000
--- a/python-packages/pyasn1/codec/der/decoder.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# DER decoder
-from pyasn1.type import univ
-from pyasn1.codec.cer import decoder
-
-tagMap = decoder.tagMap
-typeMap = decoder.typeMap
-Decoder = decoder.Decoder
-
-decode = Decoder(tagMap, typeMap)
diff --git a/python-packages/pyasn1/codec/der/encoder.py b/python-packages/pyasn1/codec/der/encoder.py
deleted file mode 100644
index 4e5faefad4..0000000000
--- a/python-packages/pyasn1/codec/der/encoder.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# DER encoder
-from pyasn1.type import univ
-from pyasn1.codec.cer import encoder
-
-class SetOfEncoder(encoder.SetOfEncoder):
- def _cmpSetComponents(self, c1, c2):
- tagSet1 = isinstance(c1, univ.Choice) and \
- c1.getEffectiveTagSet() or c1.getTagSet()
- tagSet2 = isinstance(c2, univ.Choice) and \
- c2.getEffectiveTagSet() or c2.getTagSet()
- return cmp(tagSet1, tagSet2)
-
-tagMap = encoder.tagMap.copy()
-tagMap.update({
- # Overload CER encodrs with BER ones (a bit hackerish XXX)
- univ.BitString.tagSet: encoder.encoder.BitStringEncoder(),
- univ.OctetString.tagSet: encoder.encoder.OctetStringEncoder(),
- # Set & SetOf have same tags
- univ.SetOf().tagSet: SetOfEncoder()
- })
-
-typeMap = encoder.typeMap
-
-class Encoder(encoder.Encoder):
- def __call__(self, client, defMode=1, maxChunkSize=0):
- return encoder.Encoder.__call__(self, client, defMode, maxChunkSize)
-
-encode = Encoder(tagMap, typeMap)
diff --git a/python-packages/pyasn1/compat/__init__.py b/python-packages/pyasn1/compat/__init__.py
deleted file mode 100644
index 8c3066b2e6..0000000000
--- a/python-packages/pyasn1/compat/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This file is necessary to make this directory a package.
diff --git a/python-packages/pyasn1/compat/octets.py b/python-packages/pyasn1/compat/octets.py
deleted file mode 100644
index f7f2a29bf5..0000000000
--- a/python-packages/pyasn1/compat/octets.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from sys import version_info
-
-if version_info[0] <= 2:
- int2oct = chr
- ints2octs = lambda s: ''.join([ int2oct(x) for x in s ])
- null = ''
- oct2int = ord
- octs2ints = lambda s: [ oct2int(x) for x in s ]
- str2octs = lambda x: x
- octs2str = lambda x: x
- isOctetsType = lambda s: isinstance(s, str)
-else:
- ints2octs = bytes
- int2oct = lambda x: ints2octs((x,))
- null = ints2octs()
- oct2int = lambda x: x
- octs2ints = lambda s: [ x for x in s ]
- str2octs = lambda x: x.encode()
- octs2str = lambda x: x.decode()
- isOctetsType = lambda s: isinstance(s, bytes)
diff --git a/python-packages/pyasn1/debug.py b/python-packages/pyasn1/debug.py
deleted file mode 100644
index 8f9dadf830..0000000000
--- a/python-packages/pyasn1/debug.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import sys
-from pyasn1.compat.octets import octs2ints
-from pyasn1 import error
-
-flagNone = 0x0000
-flagEncoder = 0x0001
-flagDecoder = 0x0002
-flagAll = 0xffff
-
-flagMap = {
- 'encoder': flagEncoder,
- 'decoder': flagDecoder,
- 'all': flagAll
- }
-
-class Debug:
- defaultPrinter = sys.stderr.write
- def __init__(self, *flags):
- self._flags = flagNone
- self._printer = self.defaultPrinter
- for f in flags:
- if f not in flagMap:
- raise error.PyAsn1Error('bad debug flag %s' % (f,))
- self._flags = self._flags | flagMap[f]
- self('debug category %s enabled' % f)
-
- def __str__(self):
- return 'logger %s, flags %x' % (self._printer, self._flags)
-
- def __call__(self, msg):
- self._printer('DBG: %s\n' % msg)
-
- def __and__(self, flag):
- return self._flags & flag
-
- def __rand__(self, flag):
- return flag & self._flags
-
-logger = 0
-
-def setLogger(l):
- global logger
- logger = l
-
-def hexdump(octets):
- return ' '.join(
- [ '%s%.2X' % (n%16 == 0 and ('\n%.5d: ' % n) or '', x)
- for n,x in zip(range(len(octets)), octs2ints(octets)) ]
- )
-
-class Scope:
- def __init__(self):
- self._list = []
-
- def __str__(self): return '.'.join(self._list)
-
- def push(self, token):
- self._list.append(token)
-
- def pop(self):
- return self._list.pop()
-
-scope = Scope()
diff --git a/python-packages/pyasn1/error.py b/python-packages/pyasn1/error.py
deleted file mode 100644
index 716406ff63..0000000000
--- a/python-packages/pyasn1/error.py
+++ /dev/null
@@ -1,3 +0,0 @@
-class PyAsn1Error(Exception): pass
-class ValueConstraintError(PyAsn1Error): pass
-class SubstrateUnderrunError(PyAsn1Error): pass
diff --git a/python-packages/pyasn1/type/__init__.py b/python-packages/pyasn1/type/__init__.py
deleted file mode 100644
index 8c3066b2e6..0000000000
--- a/python-packages/pyasn1/type/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This file is necessary to make this directory a package.
diff --git a/python-packages/pyasn1/type/base.py b/python-packages/pyasn1/type/base.py
deleted file mode 100644
index db31671e82..0000000000
--- a/python-packages/pyasn1/type/base.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# Base classes for ASN.1 types
-import sys
-from pyasn1.type import constraint, tagmap
-from pyasn1 import error
-
-class Asn1Item: pass
-
-class Asn1ItemBase(Asn1Item):
- # Set of tags for this ASN.1 type
- tagSet = ()
-
- # A list of constraint.Constraint instances for checking values
- subtypeSpec = constraint.ConstraintsIntersection()
-
- # Used for ambiguous ASN.1 types identification
- typeId = None
-
- def __init__(self, tagSet=None, subtypeSpec=None):
- if tagSet is None:
- self._tagSet = self.tagSet
- else:
- self._tagSet = tagSet
- if subtypeSpec is None:
- self._subtypeSpec = self.subtypeSpec
- else:
- self._subtypeSpec = subtypeSpec
-
- def _verifySubtypeSpec(self, value, idx=None):
- try:
- self._subtypeSpec(value, idx)
- except error.PyAsn1Error:
- c, i, t = sys.exc_info()
- raise c('%s at %s' % (i, self.__class__.__name__))
-
- def getSubtypeSpec(self): return self._subtypeSpec
-
- def getTagSet(self): return self._tagSet
- def getEffectiveTagSet(self): return self._tagSet # used by untagged types
- def getTagMap(self): return tagmap.TagMap({self._tagSet: self})
-
- def isSameTypeWith(self, other):
- return self is other or \
- self._tagSet == other.getTagSet() and \
- self._subtypeSpec == other.getSubtypeSpec()
- def isSuperTypeOf(self, other):
- """Returns true if argument is a ASN1 subtype of ourselves"""
- return self._tagSet.isSuperTagSetOf(other.getTagSet()) and \
- self._subtypeSpec.isSuperTypeOf(other.getSubtypeSpec())
-
-class __NoValue:
- def __getattr__(self, attr):
- raise error.PyAsn1Error('No value for %s()' % attr)
- def __getitem__(self, i):
- raise error.PyAsn1Error('No value')
-
-noValue = __NoValue()
-
-# Base class for "simple" ASN.1 objects. These are immutable.
-class AbstractSimpleAsn1Item(Asn1ItemBase):
- defaultValue = noValue
- def __init__(self, value=None, tagSet=None, subtypeSpec=None):
- Asn1ItemBase.__init__(self, tagSet, subtypeSpec)
- if value is None or value is noValue:
- value = self.defaultValue
- if value is None or value is noValue:
- self.__hashedValue = value = noValue
- else:
- value = self.prettyIn(value)
- self._verifySubtypeSpec(value)
- self.__hashedValue = hash(value)
- self._value = value
- self._len = None
-
- def __repr__(self):
- if self._value is noValue:
- return self.__class__.__name__ + '()'
- else:
- return self.__class__.__name__ + '(%s)' % (self.prettyOut(self._value),)
- def __str__(self): return str(self._value)
- def __eq__(self, other):
- return self is other and True or self._value == other
- def __ne__(self, other): return self._value != other
- def __lt__(self, other): return self._value < other
- def __le__(self, other): return self._value <= other
- def __gt__(self, other): return self._value > other
- def __ge__(self, other): return self._value >= other
- if sys.version_info[0] <= 2:
- def __nonzero__(self): return bool(self._value)
- else:
- def __bool__(self): return bool(self._value)
- def __hash__(self): return self.__hashedValue
-
- def clone(self, value=None, tagSet=None, subtypeSpec=None):
- if value is None and tagSet is None and subtypeSpec is None:
- return self
- if value is None:
- value = self._value
- if tagSet is None:
- tagSet = self._tagSet
- if subtypeSpec is None:
- subtypeSpec = self._subtypeSpec
- return self.__class__(value, tagSet, subtypeSpec)
-
- def subtype(self, value=None, implicitTag=None, explicitTag=None,
- subtypeSpec=None):
- if value is None:
- value = self._value
- if implicitTag is not None:
- tagSet = self._tagSet.tagImplicitly(implicitTag)
- elif explicitTag is not None:
- tagSet = self._tagSet.tagExplicitly(explicitTag)
- else:
- tagSet = self._tagSet
- if subtypeSpec is None:
- subtypeSpec = self._subtypeSpec
- else:
- subtypeSpec = subtypeSpec + self._subtypeSpec
- return self.__class__(value, tagSet, subtypeSpec)
-
- def prettyIn(self, value): return value
- def prettyOut(self, value): return str(value)
-
- def prettyPrint(self, scope=0): return self.prettyOut(self._value)
- # XXX Compatibility stub
- def prettyPrinter(self, scope=0): return self.prettyPrint(scope)
-
-#
-# Constructed types:
-# * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice
-# * ASN1 types and values are represened by Python class instances
-# * Value initialization is made for defaulted components only
-# * Primary method of component addressing is by-position. Data model for base
-# type is Python sequence. Additional type-specific addressing methods
-# may be implemented for particular types.
-# * SequenceOf and SetOf types do not implement any additional methods
-# * Sequence, Set and Choice types also implement by-identifier addressing
-# * Sequence, Set and Choice types also implement by-asn1-type (tag) addressing
-# * Sequence and Set types may include optional and defaulted
-# components
-# * Constructed types hold a reference to component types used for value
-# verification and ordering.
-# * Component type is a scalar type for SequenceOf/SetOf types and a list
-# of types for Sequence/Set/Choice.
-#
-
-class AbstractConstructedAsn1Item(Asn1ItemBase):
- componentType = None
- sizeSpec = constraint.ConstraintsIntersection()
- def __init__(self, componentType=None, tagSet=None,
- subtypeSpec=None, sizeSpec=None):
- Asn1ItemBase.__init__(self, tagSet, subtypeSpec)
- if componentType is None:
- self._componentType = self.componentType
- else:
- self._componentType = componentType
- if sizeSpec is None:
- self._sizeSpec = self.sizeSpec
- else:
- self._sizeSpec = sizeSpec
- self._componentValues = []
- self._componentValuesSet = 0
-
- def __repr__(self):
- r = self.__class__.__name__ + '()'
- for idx in range(len(self._componentValues)):
- if self._componentValues[idx] is None:
- continue
- r = r + '.setComponentByPosition(%s, %r)' % (
- idx, self._componentValues[idx]
- )
- return r
-
- def __eq__(self, other):
- return self is other and True or self._componentValues == other
- def __ne__(self, other): return self._componentValues != other
- def __lt__(self, other): return self._componentValues < other
- def __le__(self, other): return self._componentValues <= other
- def __gt__(self, other): return self._componentValues > other
- def __ge__(self, other): return self._componentValues >= other
- if sys.version_info[0] <= 2:
- def __nonzero__(self): return bool(self._componentValues)
- else:
- def __bool__(self): return bool(self._componentValues)
-
- def getComponentTagMap(self):
- raise error.PyAsn1Error('Method not implemented')
-
- def _cloneComponentValues(self, myClone, cloneValueFlag): pass
-
- def clone(self, tagSet=None, subtypeSpec=None, sizeSpec=None,
- cloneValueFlag=None):
- if tagSet is None:
- tagSet = self._tagSet
- if subtypeSpec is None:
- subtypeSpec = self._subtypeSpec
- if sizeSpec is None:
- sizeSpec = self._sizeSpec
- r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec)
- if cloneValueFlag:
- self._cloneComponentValues(r, cloneValueFlag)
- return r
-
- def subtype(self, implicitTag=None, explicitTag=None, subtypeSpec=None,
- sizeSpec=None, cloneValueFlag=None):
- if implicitTag is not None:
- tagSet = self._tagSet.tagImplicitly(implicitTag)
- elif explicitTag is not None:
- tagSet = self._tagSet.tagExplicitly(explicitTag)
- else:
- tagSet = self._tagSet
- if subtypeSpec is None:
- subtypeSpec = self._subtypeSpec
- else:
- subtypeSpec = subtypeSpec + self._subtypeSpec
- if sizeSpec is None:
- sizeSpec = self._sizeSpec
- else:
- sizeSpec = sizeSpec + self._sizeSpec
- r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec)
- if cloneValueFlag:
- self._cloneComponentValues(r, cloneValueFlag)
- return r
-
- def _verifyComponent(self, idx, value): pass
-
- def verifySizeSpec(self): self._sizeSpec(self)
-
- def getComponentByPosition(self, idx):
- raise error.PyAsn1Error('Method not implemented')
- def setComponentByPosition(self, idx, value, verifyConstraints=True):
- raise error.PyAsn1Error('Method not implemented')
-
- def getComponentType(self): return self._componentType
-
- def __getitem__(self, idx): return self.getComponentByPosition(idx)
- def __setitem__(self, idx, value): self.setComponentByPosition(idx, value)
-
- def __len__(self): return len(self._componentValues)
-
- def clear(self):
- self._componentValues = []
- self._componentValuesSet = 0
-
- def setDefaultComponents(self): pass
diff --git a/python-packages/pyasn1/type/char.py b/python-packages/pyasn1/type/char.py
deleted file mode 100644
index ae112f8bd3..0000000000
--- a/python-packages/pyasn1/type/char.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# ASN.1 "character string" types
-from pyasn1.type import univ, tag
-
-class UTF8String(univ.OctetString):
- tagSet = univ.OctetString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
- )
- encoding = "utf-8"
-
-class NumericString(univ.OctetString):
- tagSet = univ.OctetString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
- )
-
-class PrintableString(univ.OctetString):
- tagSet = univ.OctetString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
- )
-
-class TeletexString(univ.OctetString):
- tagSet = univ.OctetString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
- )
-
-
-class VideotexString(univ.OctetString):
- tagSet = univ.OctetString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
- )
-
-class IA5String(univ.OctetString):
- tagSet = univ.OctetString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
- )
-
-class GraphicString(univ.OctetString):
- tagSet = univ.OctetString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
- )
-
-class VisibleString(univ.OctetString):
- tagSet = univ.OctetString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
- )
-
-class GeneralString(univ.OctetString):
- tagSet = univ.OctetString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
- )
-
-class UniversalString(univ.OctetString):
- tagSet = univ.OctetString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
- )
- encoding = "utf-32-be"
-
-class BMPString(univ.OctetString):
- tagSet = univ.OctetString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
- )
- encoding = "utf-16-be"
diff --git a/python-packages/pyasn1/type/constraint.py b/python-packages/pyasn1/type/constraint.py
deleted file mode 100644
index 66873937d8..0000000000
--- a/python-packages/pyasn1/type/constraint.py
+++ /dev/null
@@ -1,200 +0,0 @@
-#
-# ASN.1 subtype constraints classes.
-#
-# Constraints are relatively rare, but every ASN1 object
-# is doing checks all the time for whether they have any
-# constraints and whether they are applicable to the object.
-#
-# What we're going to do is define objects/functions that
-# can be called unconditionally if they are present, and that
-# are simply not present if there are no constraints.
-#
-# Original concept and code by Mike C. Fletcher.
-#
-import sys
-from pyasn1.type import error
-
-class AbstractConstraint:
- """Abstract base-class for constraint objects
-
- Constraints should be stored in a simple sequence in the
- namespace of their client Asn1Item sub-classes.
- """
- def __init__(self, *values):
- self._valueMap = {}
- self._setValues(values)
- self.__hashedValues = None
- def __call__(self, value, idx=None):
- try:
- self._testValue(value, idx)
- except error.ValueConstraintError:
- raise error.ValueConstraintError(
- '%s failed at: \"%s\"' % (self, sys.exc_info()[1])
- )
- def __repr__(self):
- return '%s(%s)' % (
- self.__class__.__name__,
- ', '.join([repr(x) for x in self._values])
- )
- def __eq__(self, other):
- return self is other and True or self._values == other
- def __ne__(self, other): return self._values != other
- def __lt__(self, other): return self._values < other
- def __le__(self, other): return self._values <= other
- def __gt__(self, other): return self._values > other
- def __ge__(self, other): return self._values >= other
- if sys.version_info[0] <= 2:
- def __nonzero__(self): return bool(self._values)
- else:
- def __bool__(self): return bool(self._values)
-
- def __hash__(self):
- if self.__hashedValues is None:
- self.__hashedValues = hash((self.__class__.__name__, self._values))
- return self.__hashedValues
-
- def _setValues(self, values): self._values = values
- def _testValue(self, value, idx):
- raise error.ValueConstraintError(value)
-
- # Constraints derivation logic
- def getValueMap(self): return self._valueMap
- def isSuperTypeOf(self, otherConstraint):
- return self in otherConstraint.getValueMap() or \
- otherConstraint is self or otherConstraint == self
- def isSubTypeOf(self, otherConstraint):
- return otherConstraint in self._valueMap or \
- otherConstraint is self or otherConstraint == self
-
-class SingleValueConstraint(AbstractConstraint):
- """Value must be part of defined values constraint"""
- def _testValue(self, value, idx):
- # XXX index vals for performance?
- if value not in self._values:
- raise error.ValueConstraintError(value)
-
-class ContainedSubtypeConstraint(AbstractConstraint):
- """Value must satisfy all of defined set of constraints"""
- def _testValue(self, value, idx):
- for c in self._values:
- c(value, idx)
-
-class ValueRangeConstraint(AbstractConstraint):
- """Value must be within start and stop values (inclusive)"""
- def _testValue(self, value, idx):
- if value < self.start or value > self.stop:
- raise error.ValueConstraintError(value)
-
- def _setValues(self, values):
- if len(values) != 2:
- raise error.PyAsn1Error(
- '%s: bad constraint values' % (self.__class__.__name__,)
- )
- self.start, self.stop = values
- if self.start > self.stop:
- raise error.PyAsn1Error(
- '%s: screwed constraint values (start > stop): %s > %s' % (
- self.__class__.__name__,
- self.start, self.stop
- )
- )
- AbstractConstraint._setValues(self, values)
-
-class ValueSizeConstraint(ValueRangeConstraint):
- """len(value) must be within start and stop values (inclusive)"""
- def _testValue(self, value, idx):
- l = len(value)
- if l < self.start or l > self.stop:
- raise error.ValueConstraintError(value)
-
-class PermittedAlphabetConstraint(SingleValueConstraint):
- def _setValues(self, values):
- self._values = ()
- for v in values:
- self._values = self._values + tuple(v)
-
- def _testValue(self, value, idx):
- for v in value:
- if v not in self._values:
- raise error.ValueConstraintError(value)
-
-# This is a bit kludgy, meaning two op modes within a single constraing
-class InnerTypeConstraint(AbstractConstraint):
- """Value must satisfy type and presense constraints"""
- def _testValue(self, value, idx):
- if self.__singleTypeConstraint:
- self.__singleTypeConstraint(value)
- elif self.__multipleTypeConstraint:
- if idx not in self.__multipleTypeConstraint:
- raise error.ValueConstraintError(value)
- constraint, status = self.__multipleTypeConstraint[idx]
- if status == 'ABSENT': # XXX presense is not checked!
- raise error.ValueConstraintError(value)
- constraint(value)
-
- def _setValues(self, values):
- self.__multipleTypeConstraint = {}
- self.__singleTypeConstraint = None
- for v in values:
- if isinstance(v, tuple):
- self.__multipleTypeConstraint[v[0]] = v[1], v[2]
- else:
- self.__singleTypeConstraint = v
- AbstractConstraint._setValues(self, values)
-
-# Boolean ops on constraints
-
-class ConstraintsExclusion(AbstractConstraint):
- """Value must not fit the single constraint"""
- def _testValue(self, value, idx):
- try:
- self._values[0](value, idx)
- except error.ValueConstraintError:
- return
- else:
- raise error.ValueConstraintError(value)
-
- def _setValues(self, values):
- if len(values) != 1:
- raise error.PyAsn1Error('Single constraint expected')
- AbstractConstraint._setValues(self, values)
-
-class AbstractConstraintSet(AbstractConstraint):
- """Value must not satisfy the single constraint"""
- def __getitem__(self, idx): return self._values[idx]
-
- def __add__(self, value): return self.__class__(self, value)
- def __radd__(self, value): return self.__class__(self, value)
-
- def __len__(self): return len(self._values)
-
- # Constraints inclusion in sets
-
- def _setValues(self, values):
- self._values = values
- for v in values:
- self._valueMap[v] = 1
- self._valueMap.update(v.getValueMap())
-
-class ConstraintsIntersection(AbstractConstraintSet):
- """Value must satisfy all constraints"""
- def _testValue(self, value, idx):
- for v in self._values:
- v(value, idx)
-
-class ConstraintsUnion(AbstractConstraintSet):
- """Value must satisfy at least one constraint"""
- def _testValue(self, value, idx):
- for v in self._values:
- try:
- v(value, idx)
- except error.ValueConstraintError:
- pass
- else:
- return
- raise error.ValueConstraintError(
- 'all of %s failed for \"%s\"' % (self._values, value)
- )
-
-# XXX
-# add tests for type check
diff --git a/python-packages/pyasn1/type/error.py b/python-packages/pyasn1/type/error.py
deleted file mode 100644
index 3e68484472..0000000000
--- a/python-packages/pyasn1/type/error.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from pyasn1.error import PyAsn1Error
-
-class ValueConstraintError(PyAsn1Error): pass
diff --git a/python-packages/pyasn1/type/namedtype.py b/python-packages/pyasn1/type/namedtype.py
deleted file mode 100644
index 48967a5fe2..0000000000
--- a/python-packages/pyasn1/type/namedtype.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# NamedType specification for constructed types
-import sys
-from pyasn1.type import tagmap
-from pyasn1 import error
-
-class NamedType:
- isOptional = 0
- isDefaulted = 0
- def __init__(self, name, t):
- self.__name = name; self.__type = t
- def __repr__(self): return '%s(%s, %s)' % (
- self.__class__.__name__, self.__name, self.__type
- )
- def getType(self): return self.__type
- def getName(self): return self.__name
- def __getitem__(self, idx):
- if idx == 0: return self.__name
- if idx == 1: return self.__type
- raise IndexError()
-
-class OptionalNamedType(NamedType):
- isOptional = 1
-class DefaultedNamedType(NamedType):
- isDefaulted = 1
-
-class NamedTypes:
- def __init__(self, *namedTypes):
- self.__namedTypes = namedTypes
- self.__namedTypesLen = len(self.__namedTypes)
- self.__minTagSet = None
- self.__tagToPosIdx = {}; self.__nameToPosIdx = {}
- self.__tagMap = { False: None, True: None }
- self.__ambigiousTypes = {}
-
- def __repr__(self):
- r = '%s(' % self.__class__.__name__
- for n in self.__namedTypes:
- r = r + '%r, ' % (n,)
- return r + ')'
-
- def __getitem__(self, idx): return self.__namedTypes[idx]
-
- if sys.version_info[0] <= 2:
- def __nonzero__(self): return bool(self.__namedTypesLen)
- else:
- def __bool__(self): return bool(self.__namedTypesLen)
- def __len__(self): return self.__namedTypesLen
-
- def getTypeByPosition(self, idx):
- if idx < 0 or idx >= self.__namedTypesLen:
- raise error.PyAsn1Error('Type position out of range')
- else:
- return self.__namedTypes[idx].getType()
-
- def getPositionByType(self, tagSet):
- if not self.__tagToPosIdx:
- idx = self.__namedTypesLen
- while idx > 0:
- idx = idx - 1
- tagMap = self.__namedTypes[idx].getType().getTagMap()
- for t in tagMap.getPosMap():
- if t in self.__tagToPosIdx:
- raise error.PyAsn1Error('Duplicate type %s' % (t,))
- self.__tagToPosIdx[t] = idx
- try:
- return self.__tagToPosIdx[tagSet]
- except KeyError:
- raise error.PyAsn1Error('Type %s not found' % (tagSet,))
-
- def getNameByPosition(self, idx):
- try:
- return self.__namedTypes[idx].getName()
- except IndexError:
- raise error.PyAsn1Error('Type position out of range')
- def getPositionByName(self, name):
- if not self.__nameToPosIdx:
- idx = self.__namedTypesLen
- while idx > 0:
- idx = idx - 1
- n = self.__namedTypes[idx].getName()
- if n in self.__nameToPosIdx:
- raise error.PyAsn1Error('Duplicate name %s' % (n,))
- self.__nameToPosIdx[n] = idx
- try:
- return self.__nameToPosIdx[name]
- except KeyError:
- raise error.PyAsn1Error('Name %s not found' % (name,))
-
- def __buildAmbigiousTagMap(self):
- ambigiousTypes = ()
- idx = self.__namedTypesLen
- while idx > 0:
- idx = idx - 1
- t = self.__namedTypes[idx]
- if t.isOptional or t.isDefaulted:
- ambigiousTypes = (t, ) + ambigiousTypes
- else:
- ambigiousTypes = (t, )
- self.__ambigiousTypes[idx] = NamedTypes(*ambigiousTypes)
-
- def getTagMapNearPosition(self, idx):
- if not self.__ambigiousTypes: self.__buildAmbigiousTagMap()
- try:
- return self.__ambigiousTypes[idx].getTagMap()
- except KeyError:
- raise error.PyAsn1Error('Type position out of range')
-
- def getPositionNearType(self, tagSet, idx):
- if not self.__ambigiousTypes: self.__buildAmbigiousTagMap()
- try:
- return idx+self.__ambigiousTypes[idx].getPositionByType(tagSet)
- except KeyError:
- raise error.PyAsn1Error('Type position out of range')
-
- def genMinTagSet(self):
- if self.__minTagSet is None:
- for t in self.__namedTypes:
- __type = t.getType()
- tagSet = getattr(__type,'getMinTagSet',__type.getTagSet)()
- if self.__minTagSet is None or tagSet < self.__minTagSet:
- self.__minTagSet = tagSet
- return self.__minTagSet
-
- def getTagMap(self, uniq=False):
- if self.__tagMap[uniq] is None:
- tagMap = tagmap.TagMap()
- for nt in self.__namedTypes:
- tagMap = tagMap.clone(
- nt.getType(), nt.getType().getTagMap(), uniq
- )
- self.__tagMap[uniq] = tagMap
- return self.__tagMap[uniq]
diff --git a/python-packages/pyasn1/type/namedval.py b/python-packages/pyasn1/type/namedval.py
deleted file mode 100644
index d0fea7cc7c..0000000000
--- a/python-packages/pyasn1/type/namedval.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# ASN.1 named integers
-from pyasn1 import error
-
-__all__ = [ 'NamedValues' ]
-
-class NamedValues:
- def __init__(self, *namedValues):
- self.nameToValIdx = {}; self.valToNameIdx = {}
- self.namedValues = ()
- automaticVal = 1
- for namedValue in namedValues:
- if isinstance(namedValue, tuple):
- name, val = namedValue
- else:
- name = namedValue
- val = automaticVal
- if name in self.nameToValIdx:
- raise error.PyAsn1Error('Duplicate name %s' % (name,))
- self.nameToValIdx[name] = val
- if val in self.valToNameIdx:
- raise error.PyAsn1Error('Duplicate value %s=%s' % (name, val))
- self.valToNameIdx[val] = name
- self.namedValues = self.namedValues + ((name, val),)
- automaticVal = automaticVal + 1
- def __str__(self): return str(self.namedValues)
-
- def getName(self, value):
- if value in self.valToNameIdx:
- return self.valToNameIdx[value]
-
- def getValue(self, name):
- if name in self.nameToValIdx:
- return self.nameToValIdx[name]
-
- def __getitem__(self, i): return self.namedValues[i]
- def __len__(self): return len(self.namedValues)
-
- def __add__(self, namedValues):
- return self.__class__(*self.namedValues + namedValues)
- def __radd__(self, namedValues):
- return self.__class__(*namedValues + tuple(self))
-
- def clone(self, *namedValues):
- return self.__class__(*tuple(self) + namedValues)
-
-# XXX clone/subtype?
diff --git a/python-packages/pyasn1/type/tag.py b/python-packages/pyasn1/type/tag.py
deleted file mode 100644
index 1144907fa1..0000000000
--- a/python-packages/pyasn1/type/tag.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# ASN.1 types tags
-from operator import getitem
-from pyasn1 import error
-
-tagClassUniversal = 0x00
-tagClassApplication = 0x40
-tagClassContext = 0x80
-tagClassPrivate = 0xC0
-
-tagFormatSimple = 0x00
-tagFormatConstructed = 0x20
-
-tagCategoryImplicit = 0x01
-tagCategoryExplicit = 0x02
-tagCategoryUntagged = 0x04
-
-class Tag:
- def __init__(self, tagClass, tagFormat, tagId):
- if tagId < 0:
- raise error.PyAsn1Error(
- 'Negative tag ID (%s) not allowed' % (tagId,)
- )
- self.__tag = (tagClass, tagFormat, tagId)
- self.uniq = (tagClass, tagId)
- self.__hashedUniqTag = hash(self.uniq)
-
- def __repr__(self):
- return '%s(tagClass=%s, tagFormat=%s, tagId=%s)' % (
- (self.__class__.__name__,) + self.__tag
- )
- # These is really a hotspot -- expose public "uniq" attribute to save on
- # function calls
- def __eq__(self, other): return self.uniq == other.uniq
- def __ne__(self, other): return self.uniq != other.uniq
- def __lt__(self, other): return self.uniq < other.uniq
- def __le__(self, other): return self.uniq <= other.uniq
- def __gt__(self, other): return self.uniq > other.uniq
- def __ge__(self, other): return self.uniq >= other.uniq
- def __hash__(self): return self.__hashedUniqTag
- def __getitem__(self, idx): return self.__tag[idx]
- def __and__(self, otherTag):
- (tagClass, tagFormat, tagId) = otherTag
- return self.__class__(
- self.__tag&tagClass, self.__tag&tagFormat, self.__tag&tagId
- )
- def __or__(self, otherTag):
- (tagClass, tagFormat, tagId) = otherTag
- return self.__class__(
- self.__tag[0]|tagClass,
- self.__tag[1]|tagFormat,
- self.__tag[2]|tagId
- )
- def asTuple(self): return self.__tag # __getitem__() is slow
-
-class TagSet:
- def __init__(self, baseTag=(), *superTags):
- self.__baseTag = baseTag
- self.__superTags = superTags
- self.__hashedSuperTags = hash(superTags)
- _uniq = ()
- for t in superTags:
- _uniq = _uniq + t.uniq
- self.uniq = _uniq
- self.__lenOfSuperTags = len(superTags)
-
- def __repr__(self):
- return '%s(%s)' % (
- self.__class__.__name__,
- ', '.join([repr(x) for x in self.__superTags])
- )
-
- def __add__(self, superTag):
- return self.__class__(
- self.__baseTag, *self.__superTags + (superTag,)
- )
- def __radd__(self, superTag):
- return self.__class__(
- self.__baseTag, *(superTag,) + self.__superTags
- )
-
- def tagExplicitly(self, superTag):
- tagClass, tagFormat, tagId = superTag
- if tagClass == tagClassUniversal:
- raise error.PyAsn1Error(
- 'Can\'t tag with UNIVERSAL-class tag'
- )
- if tagFormat != tagFormatConstructed:
- superTag = Tag(tagClass, tagFormatConstructed, tagId)
- return self + superTag
-
- def tagImplicitly(self, superTag):
- tagClass, tagFormat, tagId = superTag
- if self.__superTags:
- superTag = Tag(tagClass, self.__superTags[-1][1], tagId)
- return self[:-1] + superTag
-
- def getBaseTag(self): return self.__baseTag
- def __getitem__(self, idx):
- if isinstance(idx, slice):
- return self.__class__(
- self.__baseTag, *getitem(self.__superTags, idx)
- )
- return self.__superTags[idx]
- def __eq__(self, other): return self.uniq == other.uniq
- def __ne__(self, other): return self.uniq != other.uniq
- def __lt__(self, other): return self.uniq < other.uniq
- def __le__(self, other): return self.uniq <= other.uniq
- def __gt__(self, other): return self.uniq > other.uniq
- def __ge__(self, other): return self.uniq >= other.uniq
- def __hash__(self): return self.__hashedSuperTags
- def __len__(self): return self.__lenOfSuperTags
- def isSuperTagSetOf(self, tagSet):
- if len(tagSet) < self.__lenOfSuperTags:
- return
- idx = self.__lenOfSuperTags - 1
- while idx >= 0:
- if self.__superTags[idx] != tagSet[idx]:
- return
- idx = idx - 1
- return 1
-
-def initTagSet(tag): return TagSet(tag, tag)
diff --git a/python-packages/pyasn1/type/tagmap.py b/python-packages/pyasn1/type/tagmap.py
deleted file mode 100644
index 7cec3a10e4..0000000000
--- a/python-packages/pyasn1/type/tagmap.py
+++ /dev/null
@@ -1,52 +0,0 @@
-from pyasn1 import error
-
-class TagMap:
- def __init__(self, posMap={}, negMap={}, defType=None):
- self.__posMap = posMap.copy()
- self.__negMap = negMap.copy()
- self.__defType = defType
-
- def __contains__(self, tagSet):
- return tagSet in self.__posMap or \
- self.__defType is not None and tagSet not in self.__negMap
-
- def __getitem__(self, tagSet):
- if tagSet in self.__posMap:
- return self.__posMap[tagSet]
- elif tagSet in self.__negMap:
- raise error.PyAsn1Error('Key in negative map')
- elif self.__defType is not None:
- return self.__defType
- else:
- raise KeyError()
-
- def __repr__(self):
- s = '%r/%r' % (self.__posMap, self.__negMap)
- if self.__defType is not None:
- s = s + '/%r' % (self.__defType,)
- return s
-
- def clone(self, parentType, tagMap, uniq=False):
- if self.__defType is not None and tagMap.getDef() is not None:
- raise error.PyAsn1Error('Duplicate default value at %s' % (self,))
- if tagMap.getDef() is not None:
- defType = tagMap.getDef()
- else:
- defType = self.__defType
-
- posMap = self.__posMap.copy()
- for k in tagMap.getPosMap():
- if uniq and k in posMap:
- raise error.PyAsn1Error('Duplicate positive key %s' % (k,))
- posMap[k] = parentType
-
- negMap = self.__negMap.copy()
- negMap.update(tagMap.getNegMap())
-
- return self.__class__(
- posMap, negMap, defType,
- )
-
- def getPosMap(self): return self.__posMap.copy()
- def getNegMap(self): return self.__negMap.copy()
- def getDef(self): return self.__defType
diff --git a/python-packages/pyasn1/type/univ.py b/python-packages/pyasn1/type/univ.py
deleted file mode 100644
index 9cd16f8a2a..0000000000
--- a/python-packages/pyasn1/type/univ.py
+++ /dev/null
@@ -1,1042 +0,0 @@
-# ASN.1 "universal" data types
-import operator, sys
-from pyasn1.type import base, tag, constraint, namedtype, namedval, tagmap
-from pyasn1.codec.ber import eoo
-from pyasn1.compat import octets
-from pyasn1 import error
-
-# "Simple" ASN.1 types (yet incomplete)
-
-class Integer(base.AbstractSimpleAsn1Item):
- tagSet = baseTagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
- )
- namedValues = namedval.NamedValues()
- def __init__(self, value=None, tagSet=None, subtypeSpec=None,
- namedValues=None):
- if namedValues is None:
- self.__namedValues = self.namedValues
- else:
- self.__namedValues = namedValues
- base.AbstractSimpleAsn1Item.__init__(
- self, value, tagSet, subtypeSpec
- )
-
- def __and__(self, value): return self.clone(self._value & value)
- def __rand__(self, value): return self.clone(value & self._value)
- def __or__(self, value): return self.clone(self._value | value)
- def __ror__(self, value): return self.clone(value | self._value)
- def __xor__(self, value): return self.clone(self._value ^ value)
- def __rxor__(self, value): return self.clone(value ^ self._value)
- def __lshift__(self, value): return self.clone(self._value << value)
- def __rshift__(self, value): return self.clone(self._value >> value)
-
- def __add__(self, value): return self.clone(self._value + value)
- def __radd__(self, value): return self.clone(value + self._value)
- def __sub__(self, value): return self.clone(self._value - value)
- def __rsub__(self, value): return self.clone(value - self._value)
- def __mul__(self, value): return self.clone(self._value * value)
- def __rmul__(self, value): return self.clone(value * self._value)
- def __mod__(self, value): return self.clone(self._value % value)
- def __rmod__(self, value): return self.clone(value % self._value)
- def __pow__(self, value, modulo=None): return self.clone(pow(self._value, value, modulo))
- def __rpow__(self, value): return self.clone(pow(value, self._value))
-
- if sys.version_info[0] <= 2:
- def __div__(self, value): return self.clone(self._value // value)
- def __rdiv__(self, value): return self.clone(value // self._value)
- else:
- def __truediv__(self, value): return self.clone(self._value / value)
- def __rtruediv__(self, value): return self.clone(value / self._value)
- def __divmod__(self, value): return self.clone(self._value // value)
- def __rdivmod__(self, value): return self.clone(value // self._value)
-
- __hash__ = base.AbstractSimpleAsn1Item.__hash__
-
- def __int__(self): return int(self._value)
- if sys.version_info[0] <= 2:
- def __long__(self): return long(self._value)
- def __float__(self): return float(self._value)
- def __abs__(self): return abs(self._value)
- def __index__(self): return int(self._value)
-
- def __lt__(self, value): return self._value < value
- def __le__(self, value): return self._value <= value
- def __eq__(self, value): return self._value == value
- def __ne__(self, value): return self._value != value
- def __gt__(self, value): return self._value > value
- def __ge__(self, value): return self._value >= value
-
- def prettyIn(self, value):
- if not isinstance(value, str):
- try:
- return int(value)
- except:
- raise error.PyAsn1Error(
- 'Can\'t coerce %s into integer: %s' % (value, sys.exc_info()[1])
- )
- r = self.__namedValues.getValue(value)
- if r is not None:
- return r
- try:
- return int(value)
- except:
- raise error.PyAsn1Error(
- 'Can\'t coerce %s into integer: %s' % (value, sys.exc_info()[1])
- )
-
- def prettyOut(self, value):
- r = self.__namedValues.getName(value)
- return r is None and str(value) or repr(r)
-
- def getNamedValues(self): return self.__namedValues
-
- def clone(self, value=None, tagSet=None, subtypeSpec=None,
- namedValues=None):
- if value is None and tagSet is None and subtypeSpec is None \
- and namedValues is None:
- return self
- if value is None:
- value = self._value
- if tagSet is None:
- tagSet = self._tagSet
- if subtypeSpec is None:
- subtypeSpec = self._subtypeSpec
- if namedValues is None:
- namedValues = self.__namedValues
- return self.__class__(value, tagSet, subtypeSpec, namedValues)
-
- def subtype(self, value=None, implicitTag=None, explicitTag=None,
- subtypeSpec=None, namedValues=None):
- if value is None:
- value = self._value
- if implicitTag is not None:
- tagSet = self._tagSet.tagImplicitly(implicitTag)
- elif explicitTag is not None:
- tagSet = self._tagSet.tagExplicitly(explicitTag)
- else:
- tagSet = self._tagSet
- if subtypeSpec is None:
- subtypeSpec = self._subtypeSpec
- else:
- subtypeSpec = subtypeSpec + self._subtypeSpec
- if namedValues is None:
- namedValues = self.__namedValues
- else:
- namedValues = namedValues + self.__namedValues
- return self.__class__(value, tagSet, subtypeSpec, namedValues)
-
-class Boolean(Integer):
- tagSet = baseTagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
- )
- subtypeSpec = Integer.subtypeSpec+constraint.SingleValueConstraint(0,1)
- namedValues = Integer.namedValues.clone(('False', 0), ('True', 1))
-
-class BitString(base.AbstractSimpleAsn1Item):
- tagSet = baseTagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
- )
- namedValues = namedval.NamedValues()
- def __init__(self, value=None, tagSet=None, subtypeSpec=None,
- namedValues=None):
- if namedValues is None:
- self.__namedValues = self.namedValues
- else:
- self.__namedValues = namedValues
- base.AbstractSimpleAsn1Item.__init__(
- self, value, tagSet, subtypeSpec
- )
-
- def clone(self, value=None, tagSet=None, subtypeSpec=None,
- namedValues=None):
- if value is None and tagSet is None and subtypeSpec is None \
- and namedValues is None:
- return self
- if value is None:
- value = self._value
- if tagSet is None:
- tagSet = self._tagSet
- if subtypeSpec is None:
- subtypeSpec = self._subtypeSpec
- if namedValues is None:
- namedValues = self.__namedValues
- return self.__class__(value, tagSet, subtypeSpec, namedValues)
-
- def subtype(self, value=None, implicitTag=None, explicitTag=None,
- subtypeSpec=None, namedValues=None):
- if value is None:
- value = self._value
- if implicitTag is not None:
- tagSet = self._tagSet.tagImplicitly(implicitTag)
- elif explicitTag is not None:
- tagSet = self._tagSet.tagExplicitly(explicitTag)
- else:
- tagSet = self._tagSet
- if subtypeSpec is None:
- subtypeSpec = self._subtypeSpec
- else:
- subtypeSpec = subtypeSpec + self._subtypeSpec
- if namedValues is None:
- namedValues = self.__namedValues
- else:
- namedValues = namedValues + self.__namedValues
- return self.__class__(value, tagSet, subtypeSpec, namedValues)
-
- def __str__(self): return str(tuple(self))
-
- # Immutable sequence object protocol
-
- def __len__(self):
- if self._len is None:
- self._len = len(self._value)
- return self._len
- def __getitem__(self, i):
- if isinstance(i, slice):
- return self.clone(operator.getitem(self._value, i))
- else:
- return self._value[i]
-
- def __add__(self, value): return self.clone(self._value + value)
- def __radd__(self, value): return self.clone(value + self._value)
- def __mul__(self, value): return self.clone(self._value * value)
- def __rmul__(self, value): return self * value
-
- def prettyIn(self, value):
- r = []
- if not value:
- return ()
- elif isinstance(value, str):
- if value[0] == '\'':
- if value[-2:] == '\'B':
- for v in value[1:-2]:
- if v == '0':
- r.append(0)
- elif v == '1':
- r.append(1)
- else:
- raise error.PyAsn1Error(
- 'Non-binary BIT STRING initializer %s' % (v,)
- )
- return tuple(r)
- elif value[-2:] == '\'H':
- for v in value[1:-2]:
- i = 4
- v = int(v, 16)
- while i:
- i = i - 1
- r.append((v>>i)&0x01)
- return tuple(r)
- else:
- raise error.PyAsn1Error(
- 'Bad BIT STRING value notation %s' % (value,)
- )
- else:
- for i in value.split(','):
- j = self.__namedValues.getValue(i)
- if j is None:
- raise error.PyAsn1Error(
- 'Unknown bit identifier \'%s\'' % (i,)
- )
- if j >= len(r):
- r.extend([0]*(j-len(r)+1))
- r[j] = 1
- return tuple(r)
- elif isinstance(value, (tuple, list)):
- r = tuple(value)
- for b in r:
- if b and b != 1:
- raise error.PyAsn1Error(
- 'Non-binary BitString initializer \'%s\'' % (r,)
- )
- return r
- elif isinstance(value, BitString):
- return tuple(value)
- else:
- raise error.PyAsn1Error(
- 'Bad BitString initializer type \'%s\'' % (value,)
- )
-
- def prettyOut(self, value):
- return '\"\'%s\'B\"' % ''.join([str(x) for x in value])
-
-class OctetString(base.AbstractSimpleAsn1Item):
- tagSet = baseTagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
- )
- defaultBinValue = defaultHexValue = base.noValue
- encoding = 'us-ascii'
- def __init__(self, value=None, tagSet=None, subtypeSpec=None,
- encoding=None, binValue=None, hexValue=None):
- if encoding is None:
- self._encoding = self.encoding
- else:
- self._encoding = encoding
- if binValue is not None:
- value = self.fromBinaryString(binValue)
- if hexValue is not None:
- value = self.fromHexString(hexValue)
- if value is None or value is base.noValue:
- value = self.defaultHexValue
- if value is None or value is base.noValue:
- value = self.defaultBinValue
- self.__intValue = None
- base.AbstractSimpleAsn1Item.__init__(self, value, tagSet, subtypeSpec)
-
- def clone(self, value=None, tagSet=None, subtypeSpec=None,
- encoding=None, binValue=None, hexValue=None):
- if value is None and tagSet is None and subtypeSpec is None and \
- encoding is None and binValue is None and hexValue is None:
- return self
- if value is None and binValue is None and hexValue is None:
- value = self._value
- if tagSet is None:
- tagSet = self._tagSet
- if subtypeSpec is None:
- subtypeSpec = self._subtypeSpec
- if encoding is None:
- encoding = self._encoding
- return self.__class__(
- value, tagSet, subtypeSpec, encoding, binValue, hexValue
- )
-
- if sys.version_info[0] <= 2:
- def prettyIn(self, value):
- if isinstance(value, str):
- return value
- elif isinstance(value, (tuple, list)):
- try:
- return ''.join([ chr(x) for x in value ])
- except ValueError:
- raise error.PyAsn1Error(
- 'Bad OctetString initializer \'%s\'' % (value,)
- )
- else:
- return str(value)
- else:
- def prettyIn(self, value):
- if isinstance(value, bytes):
- return value
- elif isinstance(value, OctetString):
- return value.asOctets()
- elif isinstance(value, (tuple, list, map)):
- try:
- return bytes(value)
- except ValueError:
- raise error.PyAsn1Error(
- 'Bad OctetString initializer \'%s\'' % (value,)
- )
- else:
- try:
- return str(value).encode(self._encoding)
- except UnicodeEncodeError:
- raise error.PyAsn1Error(
- 'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
- )
-
-
- def fromBinaryString(self, value):
- bitNo = 8; byte = 0; r = ()
- for v in value:
- if bitNo:
- bitNo = bitNo - 1
- else:
- bitNo = 7
- r = r + (byte,)
- byte = 0
- if v == '0':
- v = 0
- elif v == '1':
- v = 1
- else:
- raise error.PyAsn1Error(
- 'Non-binary OCTET STRING initializer %s' % (v,)
- )
- byte = byte | (v << bitNo)
- return octets.ints2octs(r + (byte,))
-
- def fromHexString(self, value):
- r = p = ()
- for v in value:
- if p:
- r = r + (int(p+v, 16),)
- p = ()
- else:
- p = v
- if p:
- r = r + (int(p+'0', 16),)
- return octets.ints2octs(r)
-
- def prettyOut(self, value):
- if sys.version_info[0] <= 2:
- numbers = tuple([ ord(x) for x in value ])
- else:
- numbers = tuple(value)
- if [ x for x in numbers if x < 32 or x > 126 ]:
- return '0x' + ''.join([ '%.2x' % x for x in numbers ])
- else:
- return str(value)
-
- def __repr__(self):
- if self._value is base.noValue:
- return self.__class__.__name__ + '()'
- if [ x for x in self.asNumbers() if x < 32 or x > 126 ]:
- return self.__class__.__name__ + '(hexValue=\'' + ''.join([ '%.2x' % x for x in self.asNumbers() ])+'\')'
- else:
- return self.__class__.__name__ + '(\'' + self.prettyOut(self._value) + '\')'
-
- if sys.version_info[0] <= 2:
- def __str__(self): return str(self._value)
- def __unicode__(self):
- return self._value.decode(self._encoding, 'ignore')
- def asOctets(self): return self._value
- def asNumbers(self):
- if self.__intValue is None:
- self.__intValue = tuple([ ord(x) for x in self._value ])
- return self.__intValue
- else:
- def __str__(self): return self._value.decode(self._encoding, 'ignore')
- def __bytes__(self): return self._value
- def asOctets(self): return self._value
- def asNumbers(self):
- if self.__intValue is None:
- self.__intValue = tuple(self._value)
- return self.__intValue
-
- # Immutable sequence object protocol
-
- def __len__(self):
- if self._len is None:
- self._len = len(self._value)
- return self._len
- def __getitem__(self, i):
- if isinstance(i, slice):
- return self.clone(operator.getitem(self._value, i))
- else:
- return self._value[i]
-
- def __add__(self, value): return self.clone(self._value + self.prettyIn(value))
- def __radd__(self, value): return self.clone(self.prettyIn(value) + self._value)
- def __mul__(self, value): return self.clone(self._value * value)
- def __rmul__(self, value): return self * value
-
-class Null(OctetString):
- defaultValue = ''.encode() # This is tightly constrained
- tagSet = baseTagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
- )
- subtypeSpec = OctetString.subtypeSpec+constraint.SingleValueConstraint(''.encode())
-
-if sys.version_info[0] <= 2:
- intTypes = (int, long)
-else:
- intTypes = int
-
-class ObjectIdentifier(base.AbstractSimpleAsn1Item):
- tagSet = baseTagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
- )
- def __add__(self, other): return self.clone(self._value + other)
- def __radd__(self, other): return self.clone(other + self._value)
-
- def asTuple(self): return self._value
-
- # Sequence object protocol
-
- def __len__(self):
- if self._len is None:
- self._len = len(self._value)
- return self._len
- def __getitem__(self, i):
- if isinstance(i, slice):
- return self.clone(
- operator.getitem(self._value, i)
- )
- else:
- return self._value[i]
-
- def __str__(self): return self.prettyPrint()
-
- def index(self, suboid): return self._value.index(suboid)
-
- def isPrefixOf(self, value):
- """Returns true if argument OID resides deeper in the OID tree"""
- l = len(self)
- if l <= len(value):
- if self._value[:l] == value[:l]:
- return 1
- return 0
-
- def prettyIn(self, value):
- """Dotted -> tuple of numerics OID converter"""
- if isinstance(value, tuple):
- pass
- elif isinstance(value, ObjectIdentifier):
- return tuple(value)
- elif isinstance(value, str):
- r = []
- for element in [ x for x in value.split('.') if x != '' ]:
- try:
- r.append(int(element, 0))
- except ValueError:
- raise error.PyAsn1Error(
- 'Malformed Object ID %s at %s: %s' %
- (str(value), self.__class__.__name__, sys.exc_info()[1])
- )
- value = tuple(r)
- else:
- try:
- value = tuple(value)
- except TypeError:
- raise error.PyAsn1Error(
- 'Malformed Object ID %s at %s: %s' %
- (str(value), self.__class__.__name__,sys.exc_info()[1])
- )
-
- for x in value:
- if not isinstance(x, intTypes) or x < 0:
- raise error.PyAsn1Error(
- 'Invalid sub-ID in %s at %s' % (value, self.__class__.__name__)
- )
-
- return value
-
- def prettyOut(self, value): return '.'.join([ str(x) for x in value ])
-
-class Real(base.AbstractSimpleAsn1Item):
- try:
- _plusInf = float('inf')
- _minusInf = float('-inf')
- _inf = (_plusInf, _minusInf)
- except ValueError:
- # Infinity support is platform and Python dependent
- _plusInf = _minusInf = None
- _inf = ()
-
- tagSet = baseTagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
- )
-
- def __normalizeBase10(self, value):
- m, b, e = value
- while m and m % 10 == 0:
- m = m / 10
- e = e + 1
- return m, b, e
-
- def prettyIn(self, value):
- if isinstance(value, tuple) and len(value) == 3:
- for d in value:
- if not isinstance(d, intTypes):
- raise error.PyAsn1Error(
- 'Lame Real value syntax: %s' % (value,)
- )
- if value[1] not in (2, 10):
- raise error.PyAsn1Error(
- 'Prohibited base for Real value: %s' % (value[1],)
- )
- if value[1] == 10:
- value = self.__normalizeBase10(value)
- return value
- elif isinstance(value, intTypes):
- return self.__normalizeBase10((value, 10, 0))
- elif isinstance(value, float):
- if self._inf and value in self._inf:
- return value
- else:
- e = 0
- while int(value) != value:
- value = value * 10
- e = e - 1
- return self.__normalizeBase10((int(value), 10, e))
- elif isinstance(value, Real):
- return tuple(value)
- elif isinstance(value, str): # handle infinite literal
- try:
- return float(value)
- except ValueError:
- pass
- raise error.PyAsn1Error(
- 'Bad real value syntax: %s' % (value,)
- )
-
- def prettyOut(self, value):
- if value in self._inf:
- return '\'%s\'' % value
- else:
- return str(value)
-
- def isPlusInfinity(self): return self._value == self._plusInf
- def isMinusInfinity(self): return self._value == self._minusInf
- def isInfinity(self): return self._value in self._inf
-
- def __str__(self): return str(float(self))
-
- def __add__(self, value): return self.clone(float(self) + value)
- def __radd__(self, value): return self + value
- def __mul__(self, value): return self.clone(float(self) * value)
- def __rmul__(self, value): return self * value
- def __sub__(self, value): return self.clone(float(self) - value)
- def __rsub__(self, value): return self.clone(value - float(self))
- def __mod__(self, value): return self.clone(float(self) % value)
- def __rmod__(self, value): return self.clone(value % float(self))
- def __pow__(self, value, modulo=None): return self.clone(pow(float(self), value, modulo))
- def __rpow__(self, value): return self.clone(pow(value, float(self)))
-
- if sys.version_info[0] <= 2:
- def __div__(self, value): return self.clone(float(self) / value)
- def __rdiv__(self, value): return self.clone(value / float(self))
- else:
- def __truediv__(self, value): return self.clone(float(self) / value)
- def __rtruediv__(self, value): return self.clone(value / float(self))
- def __divmod__(self, value): return self.clone(float(self) // value)
- def __rdivmod__(self, value): return self.clone(value // float(self))
-
- def __int__(self): return int(float(self))
- if sys.version_info[0] <= 2:
- def __long__(self): return long(float(self))
- def __float__(self):
- if self._value in self._inf:
- return self._value
- else:
- return float(
- self._value[0] * pow(self._value[1], self._value[2])
- )
- def __abs__(self): return abs(float(self))
-
- def __lt__(self, value): return float(self) < value
- def __le__(self, value): return float(self) <= value
- def __eq__(self, value): return float(self) == value
- def __ne__(self, value): return float(self) != value
- def __gt__(self, value): return float(self) > value
- def __ge__(self, value): return float(self) >= value
-
- if sys.version_info[0] <= 2:
- def __nonzero__(self): return bool(float(self))
- else:
- def __bool__(self): return bool(float(self))
- __hash__ = base.AbstractSimpleAsn1Item.__hash__
-
- def __getitem__(self, idx):
- if self._value in self._inf:
- raise error.PyAsn1Error('Invalid infinite value operation')
- else:
- return self._value[idx]
-
-class Enumerated(Integer):
- tagSet = baseTagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
- )
-
-# "Structured" ASN.1 types
-
-class SetOf(base.AbstractConstructedAsn1Item):
- componentType = None
- tagSet = baseTagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
- )
- typeId = 1
-
- def _cloneComponentValues(self, myClone, cloneValueFlag):
- idx = 0; l = len(self._componentValues)
- while idx < l:
- c = self._componentValues[idx]
- if c is not None:
- if isinstance(c, base.AbstractConstructedAsn1Item):
- myClone.setComponentByPosition(
- idx, c.clone(cloneValueFlag=cloneValueFlag)
- )
- else:
- myClone.setComponentByPosition(idx, c.clone())
- idx = idx + 1
-
- def _verifyComponent(self, idx, value):
- if self._componentType is not None and \
- not self._componentType.isSuperTypeOf(value):
- raise error.PyAsn1Error('Component type error %s' % (value,))
-
- def getComponentByPosition(self, idx): return self._componentValues[idx]
- def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
- l = len(self._componentValues)
- if idx >= l:
- self._componentValues = self._componentValues + (idx-l+1)*[None]
- if value is None:
- if self._componentValues[idx] is None:
- if self._componentType is None:
- raise error.PyAsn1Error('Component type not defined')
- self._componentValues[idx] = self._componentType.clone()
- self._componentValuesSet = self._componentValuesSet + 1
- return self
- elif not isinstance(value, base.Asn1Item):
- if self._componentType is None:
- raise error.PyAsn1Error('Component type not defined')
- if isinstance(self._componentType, base.AbstractSimpleAsn1Item):
- value = self._componentType.clone(value=value)
- else:
- raise error.PyAsn1Error('Instance value required')
- if verifyConstraints:
- if self._componentType is not None:
- self._verifyComponent(idx, value)
- self._verifySubtypeSpec(value, idx)
- if self._componentValues[idx] is None:
- self._componentValuesSet = self._componentValuesSet + 1
- self._componentValues[idx] = value
- return self
-
- def getComponentTagMap(self):
- if self._componentType is not None:
- return self._componentType.getTagMap()
-
- def prettyPrint(self, scope=0):
- scope = scope + 1
- r = self.__class__.__name__ + ':\n'
- for idx in range(len(self._componentValues)):
- r = r + ' '*scope
- if self._componentValues[idx] is None:
- r = r + ''
- else:
- r = r + self._componentValues[idx].prettyPrint(scope)
- return r
-
-class SequenceOf(SetOf):
- tagSet = baseTagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
- )
- typeId = 2
-
-class SequenceAndSetBase(base.AbstractConstructedAsn1Item):
- componentType = namedtype.NamedTypes()
- def __init__(self, componentType=None, tagSet=None,
- subtypeSpec=None, sizeSpec=None):
- base.AbstractConstructedAsn1Item.__init__(
- self, componentType, tagSet, subtypeSpec, sizeSpec
- )
- if self._componentType is None:
- self._componentTypeLen = 0
- else:
- self._componentTypeLen = len(self._componentType)
-
- def __getitem__(self, idx):
- if isinstance(idx, str):
- return self.getComponentByName(idx)
- else:
- return base.AbstractConstructedAsn1Item.__getitem__(self, idx)
-
- def __setitem__(self, idx, value):
- if isinstance(idx, str):
- self.setComponentByName(idx, value)
- else:
- base.AbstractConstructedAsn1Item.__setitem__(self, idx, value)
-
- def _cloneComponentValues(self, myClone, cloneValueFlag):
- idx = 0; l = len(self._componentValues)
- while idx < l:
- c = self._componentValues[idx]
- if c is not None:
- if isinstance(c, base.AbstractConstructedAsn1Item):
- myClone.setComponentByPosition(
- idx, c.clone(cloneValueFlag=cloneValueFlag)
- )
- else:
- myClone.setComponentByPosition(idx, c.clone())
- idx = idx + 1
-
- def _verifyComponent(self, idx, value):
- if idx >= self._componentTypeLen:
- raise error.PyAsn1Error(
- 'Component type error out of range'
- )
- t = self._componentType[idx].getType()
- if not t.isSuperTypeOf(value):
- raise error.PyAsn1Error('Component type error %r vs %r' % (t, value))
-
- def getComponentByName(self, name):
- return self.getComponentByPosition(
- self._componentType.getPositionByName(name)
- )
- def setComponentByName(self, name, value=None, verifyConstraints=True):
- return self.setComponentByPosition(
- self._componentType.getPositionByName(name), value,
- verifyConstraints
- )
-
- def getComponentByPosition(self, idx):
- try:
- return self._componentValues[idx]
- except IndexError:
- if idx < self._componentTypeLen:
- return
- raise
- def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
- l = len(self._componentValues)
- if idx >= l:
- self._componentValues = self._componentValues + (idx-l+1)*[None]
- if value is None:
- if self._componentValues[idx] is None:
- self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
- self._componentValuesSet = self._componentValuesSet + 1
- return self
- elif not isinstance(value, base.Asn1Item):
- t = self._componentType.getTypeByPosition(idx)
- if isinstance(t, base.AbstractSimpleAsn1Item):
- value = t.clone(value=value)
- else:
- raise error.PyAsn1Error('Instance value required')
- if verifyConstraints:
- if self._componentTypeLen:
- self._verifyComponent(idx, value)
- self._verifySubtypeSpec(value, idx)
- if self._componentValues[idx] is None:
- self._componentValuesSet = self._componentValuesSet + 1
- self._componentValues[idx] = value
- return self
-
- def getNameByPosition(self, idx):
- if self._componentTypeLen:
- return self._componentType.getNameByPosition(idx)
-
- def getDefaultComponentByPosition(self, idx):
- if self._componentTypeLen and self._componentType[idx].isDefaulted:
- return self._componentType[idx].getType()
-
- def getComponentType(self):
- if self._componentTypeLen:
- return self._componentType
-
- def setDefaultComponents(self):
- if self._componentTypeLen == self._componentValuesSet:
- return
- idx = self._componentTypeLen
- while idx:
- idx = idx - 1
- if self._componentType[idx].isDefaulted:
- if self.getComponentByPosition(idx) is None:
- self.setComponentByPosition(idx)
- elif not self._componentType[idx].isOptional:
- if self.getComponentByPosition(idx) is None:
- raise error.PyAsn1Error(
- 'Uninitialized component #%s at %r' % (idx, self)
- )
-
- def prettyPrint(self, scope=0):
- scope = scope + 1
- r = self.__class__.__name__ + ':\n'
- for idx in range(len(self._componentValues)):
- if self._componentValues[idx] is not None:
- r = r + ' '*scope
- componentType = self.getComponentType()
- if componentType is None:
- r = r + ''
- else:
- r = r + componentType.getNameByPosition(idx)
- r = '%s=%s\n' % (
- r, self._componentValues[idx].prettyPrint(scope)
- )
- return r
-
-class Sequence(SequenceAndSetBase):
- tagSet = baseTagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
- )
- typeId = 3
-
- def getComponentTagMapNearPosition(self, idx):
- if self._componentType:
- return self._componentType.getTagMapNearPosition(idx)
-
- def getComponentPositionNearType(self, tagSet, idx):
- if self._componentType:
- return self._componentType.getPositionNearType(tagSet, idx)
- else:
- return idx
-
-class Set(SequenceAndSetBase):
- tagSet = baseTagSet = tag.initTagSet(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
- )
- typeId = 4
-
- def getComponent(self, innerFlag=0): return self
-
- def getComponentByType(self, tagSet, innerFlag=0):
- c = self.getComponentByPosition(
- self._componentType.getPositionByType(tagSet)
- )
- if innerFlag and isinstance(c, Set):
- # get inner component by inner tagSet
- return c.getComponent(1)
- else:
- # get outer component by inner tagSet
- return c
-
- def setComponentByType(self, tagSet, value=None, innerFlag=0,
- verifyConstraints=True):
- idx = self._componentType.getPositionByType(tagSet)
- t = self._componentType.getTypeByPosition(idx)
- if innerFlag: # set inner component by inner tagSet
- if t.getTagSet():
- return self.setComponentByPosition(
- idx, value, verifyConstraints
- )
- else:
- t = self.setComponentByPosition(idx).getComponentByPosition(idx)
- return t.setComponentByType(
- tagSet, value, innerFlag, verifyConstraints
- )
- else: # set outer component by inner tagSet
- return self.setComponentByPosition(
- idx, value, verifyConstraints
- )
-
- def getComponentTagMap(self):
- if self._componentType:
- return self._componentType.getTagMap(True)
-
- def getComponentPositionByType(self, tagSet):
- if self._componentType:
- return self._componentType.getPositionByType(tagSet)
-
-class Choice(Set):
- tagSet = baseTagSet = tag.TagSet() # untagged
- sizeSpec = constraint.ConstraintsIntersection(
- constraint.ValueSizeConstraint(1, 1)
- )
- typeId = 5
- _currentIdx = None
-
- def __eq__(self, other):
- if self._componentValues:
- return self._componentValues[self._currentIdx] == other
- return NotImplemented
- def __ne__(self, other):
- if self._componentValues:
- return self._componentValues[self._currentIdx] != other
- return NotImplemented
- def __lt__(self, other):
- if self._componentValues:
- return self._componentValues[self._currentIdx] < other
- return NotImplemented
- def __le__(self, other):
- if self._componentValues:
- return self._componentValues[self._currentIdx] <= other
- return NotImplemented
- def __gt__(self, other):
- if self._componentValues:
- return self._componentValues[self._currentIdx] > other
- return NotImplemented
- def __ge__(self, other):
- if self._componentValues:
- return self._componentValues[self._currentIdx] >= other
- return NotImplemented
- if sys.version_info[0] <= 2:
- def __nonzero__(self): return bool(self._componentValues)
- else:
- def __bool__(self): return bool(self._componentValues)
-
- def __len__(self): return self._currentIdx is not None and 1 or 0
-
- def verifySizeSpec(self):
- if self._currentIdx is None:
- raise error.PyAsn1Error('Component not chosen')
- else:
- self._sizeSpec(' ')
-
- def _cloneComponentValues(self, myClone, cloneValueFlag):
- try:
- c = self.getComponent()
- except error.PyAsn1Error:
- pass
- else:
- if isinstance(c, Choice):
- tagSet = c.getEffectiveTagSet()
- else:
- tagSet = c.getTagSet()
- if isinstance(c, base.AbstractConstructedAsn1Item):
- myClone.setComponentByType(
- tagSet, c.clone(cloneValueFlag=cloneValueFlag)
- )
- else:
- myClone.setComponentByType(tagSet, c.clone())
-
- def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
- l = len(self._componentValues)
- if idx >= l:
- self._componentValues = self._componentValues + (idx-l+1)*[None]
- if self._currentIdx is not None:
- self._componentValues[self._currentIdx] = None
- if value is None:
- if self._componentValues[idx] is None:
- self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
- self._componentValuesSet = 1
- self._currentIdx = idx
- return self
- elif not isinstance(value, base.Asn1Item):
- value = self._componentType.getTypeByPosition(idx).clone(
- value=value
- )
- if verifyConstraints:
- if self._componentTypeLen:
- self._verifyComponent(idx, value)
- self._verifySubtypeSpec(value, idx)
- self._componentValues[idx] = value
- self._currentIdx = idx
- self._componentValuesSet = 1
- return self
-
- def getMinTagSet(self):
- if self._tagSet:
- return self._tagSet
- else:
- return self._componentType.genMinTagSet()
-
- def getEffectiveTagSet(self):
- if self._tagSet:
- return self._tagSet
- else:
- c = self.getComponent()
- if isinstance(c, Choice):
- return c.getEffectiveTagSet()
- else:
- return c.getTagSet()
-
- def getTagMap(self):
- if self._tagSet:
- return Set.getTagMap(self)
- else:
- return Set.getComponentTagMap(self)
-
- def getComponent(self, innerFlag=0):
- if self._currentIdx is None:
- raise error.PyAsn1Error('Component not chosen')
- else:
- c = self._componentValues[self._currentIdx]
- if innerFlag and isinstance(c, Choice):
- return c.getComponent(innerFlag)
- else:
- return c
-
- def getName(self, innerFlag=0):
- if self._currentIdx is None:
- raise error.PyAsn1Error('Component not chosen')
- else:
- if innerFlag:
- c = self._componentValues[self._currentIdx]
- if isinstance(c, Choice):
- return c.getName(innerFlag)
- return self._componentType.getNameByPosition(self._currentIdx)
-
- def setDefaultComponents(self): pass
-
-class Any(OctetString):
- tagSet = baseTagSet = tag.TagSet() # untagged
- typeId = 6
-
- def getTagMap(self):
- return tagmap.TagMap(
- { self.getTagSet(): self },
- { eoo.endOfOctets.getTagSet(): eoo.endOfOctets },
- self
- )
-
-# XXX
-# coercion rules?
diff --git a/python-packages/pyasn1/type/useful.py b/python-packages/pyasn1/type/useful.py
deleted file mode 100644
index a7139c22ce..0000000000
--- a/python-packages/pyasn1/type/useful.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# ASN.1 "useful" types
-from pyasn1.type import char, tag
-
-class GeneralizedTime(char.VisibleString):
- tagSet = char.VisibleString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 24)
- )
-
-class UTCTime(char.VisibleString):
- tagSet = char.VisibleString.tagSet.tagImplicitly(
- tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 23)
- )
diff --git a/python-packages/rsa/__init__.py b/python-packages/rsa/__init__.py
deleted file mode 100644
index 8fb5e00ae0..0000000000
--- a/python-packages/rsa/__init__.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""RSA module
-
-Module for calculating large primes, and RSA encryption, decryption, signing
-and verification. Includes generating public and private keys.
-
-WARNING: this implementation does not use random padding, compression of the
-cleartext input to prevent repetitions, or other common security improvements.
-Use with care.
-
-If you want to have a more secure implementation, use the functions from the
-``rsa.pkcs1`` module.
-
-"""
-
-__author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly"
-__date__ = "2012-06-17"
-__version__ = '3.1.1'
-
-from rsa.key import newkeys, PrivateKey, PublicKey
-from rsa.pkcs1 import encrypt, decrypt, sign, verify, DecryptionError, \
- VerificationError
-
-# Do doctest if we're run directly
-if __name__ == "__main__":
- import doctest
- doctest.testmod()
-
-__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify", 'PublicKey',
- 'PrivateKey', 'DecryptionError', 'VerificationError']
-
diff --git a/python-packages/rsa/_compat.py b/python-packages/rsa/_compat.py
deleted file mode 100644
index 3c4eb81b13..0000000000
--- a/python-packages/rsa/_compat.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Python compatibility wrappers."""
-
-
-from __future__ import absolute_import
-
-import sys
-from struct import pack
-
-try:
- MAX_INT = sys.maxsize
-except AttributeError:
- MAX_INT = sys.maxint
-
-MAX_INT64 = (1 << 63) - 1
-MAX_INT32 = (1 << 31) - 1
-MAX_INT16 = (1 << 15) - 1
-
-# Determine the word size of the processor.
-if MAX_INT == MAX_INT64:
- # 64-bit processor.
- MACHINE_WORD_SIZE = 64
-elif MAX_INT == MAX_INT32:
- # 32-bit processor.
- MACHINE_WORD_SIZE = 32
-else:
- # Else we just assume 64-bit processor keeping up with modern times.
- MACHINE_WORD_SIZE = 64
-
-
-try:
- # < Python3
- unicode_type = unicode
- have_python3 = False
-except NameError:
- # Python3.
- unicode_type = str
- have_python3 = True
-
-# Fake byte literals.
-if str is unicode_type:
- def byte_literal(s):
- return s.encode('latin1')
-else:
- def byte_literal(s):
- return s
-
-# ``long`` is no more. Do type detection using this instead.
-try:
- integer_types = (int, long)
-except NameError:
- integer_types = (int,)
-
-b = byte_literal
-
-try:
- # Python 2.6 or higher.
- bytes_type = bytes
-except NameError:
- # Python 2.5
- bytes_type = str
-
-
-# To avoid calling b() multiple times in tight loops.
-ZERO_BYTE = b('\x00')
-EMPTY_BYTE = b('')
-
-
-def is_bytes(obj):
- """
- Determines whether the given value is a byte string.
-
- :param obj:
- The value to test.
- :returns:
- ``True`` if ``value`` is a byte string; ``False`` otherwise.
- """
- return isinstance(obj, bytes_type)
-
-
-def is_integer(obj):
- """
- Determines whether the given value is an integer.
-
- :param obj:
- The value to test.
- :returns:
- ``True`` if ``value`` is an integer; ``False`` otherwise.
- """
- return isinstance(obj, integer_types)
-
-
-def byte(num):
- """
- Converts a number between 0 and 255 (both inclusive) to a base-256 (byte)
- representation.
-
- Use it as a replacement for ``chr`` where you are expecting a byte
- because this will work on all current versions of Python::
-
- :param num:
- An unsigned integer between 0 and 255 (both inclusive).
- :returns:
- A single byte.
- """
- return pack("B", num)
-
-
-def get_word_alignment(num, force_arch=64,
- _machine_word_size=MACHINE_WORD_SIZE):
- """
- Returns alignment details for the given number based on the platform
- Python is running on.
-
- :param num:
- Unsigned integral number.
- :param force_arch:
- If you don't want to use 64-bit unsigned chunks, set this to
- anything other than 64. 32-bit chunks will be preferred then.
- Default 64 will be used when on a 64-bit machine.
- :param _machine_word_size:
- (Internal) The machine word size used for alignment.
- :returns:
- 4-tuple::
-
- (word_bits, word_bytes,
- max_uint, packing_format_type)
- """
- max_uint64 = 0xffffffffffffffff
- max_uint32 = 0xffffffff
- max_uint16 = 0xffff
- max_uint8 = 0xff
-
- if force_arch == 64 and _machine_word_size >= 64 and num > max_uint32:
- # 64-bit unsigned integer.
- return 64, 8, max_uint64, "Q"
- elif num > max_uint16:
- # 32-bit unsigned integer
- return 32, 4, max_uint32, "L"
- elif num > max_uint8:
- # 16-bit unsigned integer.
- return 16, 2, max_uint16, "H"
- else:
- # 8-bit unsigned integer.
- return 8, 1, max_uint8, "B"
diff --git a/python-packages/rsa/_version133.py b/python-packages/rsa/_version133.py
deleted file mode 100644
index 230a03c84b..0000000000
--- a/python-packages/rsa/_version133.py
+++ /dev/null
@@ -1,442 +0,0 @@
-"""RSA module
-pri = k[1] //Private part of keys d,p,q
-
-Module for calculating large primes, and RSA encryption, decryption,
-signing and verification. Includes generating public and private keys.
-
-WARNING: this code implements the mathematics of RSA. It is not suitable for
-real-world secure cryptography purposes. It has not been reviewed by a security
-expert. It does not include padding of data. There are many ways in which the
-output of this module, when used without any modification, can be sucessfully
-attacked.
-"""
-
-__author__ = "Sybren Stuvel, Marloes de Boer and Ivo Tamboer"
-__date__ = "2010-02-05"
-__version__ = '1.3.3'
-
-# NOTE: Python's modulo can return negative numbers. We compensate for
-# this behaviour using the abs() function
-
-from cPickle import dumps, loads
-import base64
-import math
-import os
-import random
-import sys
-import types
-import zlib
-
-from rsa._compat import byte
-
-# Display a warning that this insecure version is imported.
-import warnings
-warnings.warn('Insecure version of the RSA module is imported as %s, be careful'
- % __name__)
-
-def gcd(p, q):
- """Returns the greatest common divisor of p and q
-
-
- >>> gcd(42, 6)
- 6
- """
- if p>> (128*256 + 64)*256 + + 15
- 8405007
- >>> l = [128, 64, 15]
- >>> bytes2int(l)
- 8405007
- """
-
- if not (type(bytes) is types.ListType or type(bytes) is types.StringType):
- raise TypeError("You must pass a string or a list")
-
- # Convert byte stream to integer
- integer = 0
- for byte in bytes:
- integer *= 256
- if type(byte) is types.StringType: byte = ord(byte)
- integer += byte
-
- return integer
-
-def int2bytes(number):
- """Converts a number to a string of bytes
-
- >>> bytes2int(int2bytes(123456789))
- 123456789
- """
-
- if not (type(number) is types.LongType or type(number) is types.IntType):
- raise TypeError("You must pass a long or an int")
-
- string = ""
-
- while number > 0:
- string = "%s%s" % (byte(number & 0xFF), string)
- number /= 256
-
- return string
-
-def fast_exponentiation(a, p, n):
- """Calculates r = a^p mod n
- """
- result = a % n
- remainders = []
- while p != 1:
- remainders.append(p & 1)
- p = p >> 1
- while remainders:
- rem = remainders.pop()
- result = ((a ** rem) * result ** 2) % n
- return result
-
-def read_random_int(nbits):
- """Reads a random integer of approximately nbits bits rounded up
- to whole bytes"""
-
- nbytes = ceil(nbits/8.)
- randomdata = os.urandom(nbytes)
- return bytes2int(randomdata)
-
-def ceil(x):
- """ceil(x) -> int(math.ceil(x))"""
-
- return int(math.ceil(x))
-
-def randint(minvalue, maxvalue):
- """Returns a random integer x with minvalue <= x <= maxvalue"""
-
- # Safety - get a lot of random data even if the range is fairly
- # small
- min_nbits = 32
-
- # The range of the random numbers we need to generate
- range = maxvalue - minvalue
-
- # Which is this number of bytes
- rangebytes = ceil(math.log(range, 2) / 8.)
-
- # Convert to bits, but make sure it's always at least min_nbits*2
- rangebits = max(rangebytes * 8, min_nbits * 2)
-
- # Take a random number of bits between min_nbits and rangebits
- nbits = random.randint(min_nbits, rangebits)
-
- return (read_random_int(nbits) % range) + minvalue
-
-def fermat_little_theorem(p):
- """Returns 1 if p may be prime, and something else if p definitely
- is not prime"""
-
- a = randint(1, p-1)
- return fast_exponentiation(a, p-1, p)
-
-def jacobi(a, b):
- """Calculates the value of the Jacobi symbol (a/b)
- """
-
- if a % b == 0:
- return 0
- result = 1
- while a > 1:
- if a & 1:
- if ((a-1)*(b-1) >> 2) & 1:
- result = -result
- b, a = a, b % a
- else:
- if ((b ** 2 - 1) >> 3) & 1:
- result = -result
- a = a >> 1
- return result
-
-def jacobi_witness(x, n):
- """Returns False if n is an Euler pseudo-prime with base x, and
- True otherwise.
- """
-
- j = jacobi(x, n) % n
- f = fast_exponentiation(x, (n-1)/2, n)
-
- if j == f: return False
- return True
-
-def randomized_primality_testing(n, k):
- """Calculates whether n is composite (which is always correct) or
- prime (which is incorrect with error probability 2**-k)
-
- Returns False if the number if composite, and True if it's
- probably prime.
- """
-
- q = 0.5 # Property of the jacobi_witness function
-
- # t = int(math.ceil(k / math.log(1/q, 2)))
- t = ceil(k / math.log(1/q, 2))
- for i in range(t+1):
- x = randint(1, n-1)
- if jacobi_witness(x, n): return False
-
- return True
-
-def is_prime(number):
- """Returns True if the number is prime, and False otherwise.
-
- >>> is_prime(42)
- 0
- >>> is_prime(41)
- 1
- """
-
- """
- if not fermat_little_theorem(number) == 1:
- # Not prime, according to Fermat's little theorem
- return False
- """
-
- if randomized_primality_testing(number, 5):
- # Prime, according to Jacobi
- return True
-
- # Not prime
- return False
-
-
-def getprime(nbits):
- """Returns a prime number of max. 'math.ceil(nbits/8)*8' bits. In
- other words: nbits is rounded up to whole bytes.
-
- >>> p = getprime(8)
- >>> is_prime(p-1)
- 0
- >>> is_prime(p)
- 1
- >>> is_prime(p+1)
- 0
- """
-
- nbytes = int(math.ceil(nbits/8.))
-
- while True:
- integer = read_random_int(nbits)
-
- # Make sure it's odd
- integer |= 1
-
- # Test for primeness
- if is_prime(integer): break
-
- # Retry if not prime
-
- return integer
-
-def are_relatively_prime(a, b):
- """Returns True if a and b are relatively prime, and False if they
- are not.
-
- >>> are_relatively_prime(2, 3)
- 1
- >>> are_relatively_prime(2, 4)
- 0
- """
-
- d = gcd(a, b)
- return (d == 1)
-
-def find_p_q(nbits):
- """Returns a tuple of two different primes of nbits bits"""
-
- p = getprime(nbits)
- while True:
- q = getprime(nbits)
- if not q == p: break
-
- return (p, q)
-
-def extended_euclid_gcd(a, b):
- """Returns a tuple (d, i, j) such that d = gcd(a, b) = ia + jb
- """
-
- if b == 0:
- return (a, 1, 0)
-
- q = abs(a % b)
- r = long(a / b)
- (d, k, l) = extended_euclid_gcd(b, q)
-
- return (d, l, k - l*r)
-
-# Main function: calculate encryption and decryption keys
-def calculate_keys(p, q, nbits):
- """Calculates an encryption and a decryption key for p and q, and
- returns them as a tuple (e, d)"""
-
- n = p * q
- phi_n = (p-1) * (q-1)
-
- while True:
- # Make sure e has enough bits so we ensure "wrapping" through
- # modulo n
- e = getprime(max(8, nbits/2))
- if are_relatively_prime(e, n) and are_relatively_prime(e, phi_n): break
-
- (d, i, j) = extended_euclid_gcd(e, phi_n)
-
- if not d == 1:
- raise Exception("e (%d) and phi_n (%d) are not relatively prime" % (e, phi_n))
-
- if not (e * i) % phi_n == 1:
- raise Exception("e (%d) and i (%d) are not mult. inv. modulo phi_n (%d)" % (e, i, phi_n))
-
- return (e, i)
-
-
-def gen_keys(nbits):
- """Generate RSA keys of nbits bits. Returns (p, q, e, d).
-
- Note: this can take a long time, depending on the key size.
- """
-
- while True:
- (p, q) = find_p_q(nbits)
- (e, d) = calculate_keys(p, q, nbits)
-
- # For some reason, d is sometimes negative. We don't know how
- # to fix it (yet), so we keep trying until everything is shiny
- if d > 0: break
-
- return (p, q, e, d)
-
-def gen_pubpriv_keys(nbits):
- """Generates public and private keys, and returns them as (pub,
- priv).
-
- The public key consists of a dict {e: ..., , n: ....). The private
- key consists of a dict {d: ...., p: ...., q: ....).
- """
-
- (p, q, e, d) = gen_keys(nbits)
-
- return ( {'e': e, 'n': p*q}, {'d': d, 'p': p, 'q': q} )
-
-def encrypt_int(message, ekey, n):
- """Encrypts a message using encryption key 'ekey', working modulo
- n"""
-
- if type(message) is types.IntType:
- return encrypt_int(long(message), ekey, n)
-
- if not type(message) is types.LongType:
- raise TypeError("You must pass a long or an int")
-
- if message > 0 and \
- math.floor(math.log(message, 2)) > math.floor(math.log(n, 2)):
- raise OverflowError("The message is too long")
-
- return fast_exponentiation(message, ekey, n)
-
-def decrypt_int(cyphertext, dkey, n):
- """Decrypts a cypher text using the decryption key 'dkey', working
- modulo n"""
-
- return encrypt_int(cyphertext, dkey, n)
-
-def sign_int(message, dkey, n):
- """Signs 'message' using key 'dkey', working modulo n"""
-
- return decrypt_int(message, dkey, n)
-
-def verify_int(signed, ekey, n):
- """verifies 'signed' using key 'ekey', working modulo n"""
-
- return encrypt_int(signed, ekey, n)
-
-def picklechops(chops):
- """Pickles and base64encodes it's argument chops"""
-
- value = zlib.compress(dumps(chops))
- encoded = base64.encodestring(value)
- return encoded.strip()
-
-def unpicklechops(string):
- """base64decodes and unpickes it's argument string into chops"""
-
- return loads(zlib.decompress(base64.decodestring(string)))
-
-def chopstring(message, key, n, funcref):
- """Splits 'message' into chops that are at most as long as n,
- converts these into integers, and calls funcref(integer, key, n)
- for each chop.
-
- Used by 'encrypt' and 'sign'.
- """
-
- msglen = len(message)
- mbits = msglen * 8
- nbits = int(math.floor(math.log(n, 2)))
- nbytes = nbits / 8
- blocks = msglen / nbytes
-
- if msglen % nbytes > 0:
- blocks += 1
-
- cypher = []
-
- for bindex in range(blocks):
- offset = bindex * nbytes
- block = message[offset:offset+nbytes]
- value = bytes2int(block)
- cypher.append(funcref(value, key, n))
-
- return picklechops(cypher)
-
-def gluechops(chops, key, n, funcref):
- """Glues chops back together into a string. calls
- funcref(integer, key, n) for each chop.
-
- Used by 'decrypt' and 'verify'.
- """
- message = ""
-
- chops = unpicklechops(chops)
-
- for cpart in chops:
- mpart = funcref(cpart, key, n)
- message += int2bytes(mpart)
-
- return message
-
-def encrypt(message, key):
- """Encrypts a string 'message' with the public key 'key'"""
-
- return chopstring(message, key['e'], key['n'], encrypt_int)
-
-def sign(message, key):
- """Signs a string 'message' with the private key 'key'"""
-
- return chopstring(message, key['d'], key['p']*key['q'], decrypt_int)
-
-def decrypt(cypher, key):
- """Decrypts a cypher with the private key 'key'"""
-
- return gluechops(cypher, key['d'], key['p']*key['q'], decrypt_int)
-
-def verify(cypher, key):
- """Verifies a cypher with the public key 'key'"""
-
- return gluechops(cypher, key['e'], key['n'], encrypt_int)
-
-# Do doctest if we're not imported
-if __name__ == "__main__":
- import doctest
- doctest.testmod()
-
-__all__ = ["gen_pubpriv_keys", "encrypt", "decrypt", "sign", "verify"]
-
diff --git a/python-packages/rsa/_version200.py b/python-packages/rsa/_version200.py
deleted file mode 100644
index f915653857..0000000000
--- a/python-packages/rsa/_version200.py
+++ /dev/null
@@ -1,529 +0,0 @@
-"""RSA module
-
-Module for calculating large primes, and RSA encryption, decryption,
-signing and verification. Includes generating public and private keys.
-
-WARNING: this implementation does not use random padding, compression of the
-cleartext input to prevent repetitions, or other common security improvements.
-Use with care.
-
-"""
-
-__author__ = "Sybren Stuvel, Marloes de Boer, Ivo Tamboer, and Barry Mead"
-__date__ = "2010-02-08"
-__version__ = '2.0'
-
-import math
-import os
-import random
-import sys
-import types
-from rsa._compat import byte
-
-# Display a warning that this insecure version is imported.
-import warnings
-warnings.warn('Insecure version of the RSA module is imported as %s' % __name__)
-
-
-def bit_size(number):
- """Returns the number of bits required to hold a specific long number"""
-
- return int(math.ceil(math.log(number,2)))
-
-def gcd(p, q):
- """Returns the greatest common divisor of p and q
- >>> gcd(48, 180)
- 12
- """
- # Iterateive Version is faster and uses much less stack space
- while q != 0:
- if p < q: (p,q) = (q,p)
- (p,q) = (q, p % q)
- return p
-
-
-def bytes2int(bytes):
- """Converts a list of bytes or a string to an integer
-
- >>> (((128 * 256) + 64) * 256) + 15
- 8405007
- >>> l = [128, 64, 15]
- >>> bytes2int(l) #same as bytes2int('\x80@\x0f')
- 8405007
- """
-
- if not (type(bytes) is types.ListType or type(bytes) is types.StringType):
- raise TypeError("You must pass a string or a list")
-
- # Convert byte stream to integer
- integer = 0
- for byte in bytes:
- integer *= 256
- if type(byte) is types.StringType: byte = ord(byte)
- integer += byte
-
- return integer
-
-def int2bytes(number):
- """
- Converts a number to a string of bytes
- """
-
- if not (type(number) is types.LongType or type(number) is types.IntType):
- raise TypeError("You must pass a long or an int")
-
- string = ""
-
- while number > 0:
- string = "%s%s" % (byte(number & 0xFF), string)
- number /= 256
-
- return string
-
-def to64(number):
- """Converts a number in the range of 0 to 63 into base 64 digit
- character in the range of '0'-'9', 'A'-'Z', 'a'-'z','-','_'.
-
- >>> to64(10)
- 'A'
- """
-
- if not (type(number) is types.LongType or type(number) is types.IntType):
- raise TypeError("You must pass a long or an int")
-
- if 0 <= number <= 9: #00-09 translates to '0' - '9'
- return byte(number + 48)
-
- if 10 <= number <= 35:
- return byte(number + 55) #10-35 translates to 'A' - 'Z'
-
- if 36 <= number <= 61:
- return byte(number + 61) #36-61 translates to 'a' - 'z'
-
- if number == 62: # 62 translates to '-' (minus)
- return byte(45)
-
- if number == 63: # 63 translates to '_' (underscore)
- return byte(95)
-
- raise ValueError('Invalid Base64 value: %i' % number)
-
-
-def from64(number):
- """Converts an ordinal character value in the range of
- 0-9,A-Z,a-z,-,_ to a number in the range of 0-63.
-
- >>> from64(49)
- 1
- """
-
- if not (type(number) is types.LongType or type(number) is types.IntType):
- raise TypeError("You must pass a long or an int")
-
- if 48 <= number <= 57: #ord('0') - ord('9') translates to 0-9
- return(number - 48)
-
- if 65 <= number <= 90: #ord('A') - ord('Z') translates to 10-35
- return(number - 55)
-
- if 97 <= number <= 122: #ord('a') - ord('z') translates to 36-61
- return(number - 61)
-
- if number == 45: #ord('-') translates to 62
- return(62)
-
- if number == 95: #ord('_') translates to 63
- return(63)
-
- raise ValueError('Invalid Base64 value: %i' % number)
-
-
-def int2str64(number):
- """Converts a number to a string of base64 encoded characters in
- the range of '0'-'9','A'-'Z,'a'-'z','-','_'.
-
- >>> int2str64(123456789)
- '7MyqL'
- """
-
- if not (type(number) is types.LongType or type(number) is types.IntType):
- raise TypeError("You must pass a long or an int")
-
- string = ""
-
- while number > 0:
- string = "%s%s" % (to64(number & 0x3F), string)
- number /= 64
-
- return string
-
-
-def str642int(string):
- """Converts a base64 encoded string into an integer.
- The chars of this string in in the range '0'-'9','A'-'Z','a'-'z','-','_'
-
- >>> str642int('7MyqL')
- 123456789
- """
-
- if not (type(string) is types.ListType or type(string) is types.StringType):
- raise TypeError("You must pass a string or a list")
-
- integer = 0
- for byte in string:
- integer *= 64
- if type(byte) is types.StringType: byte = ord(byte)
- integer += from64(byte)
-
- return integer
-
-def read_random_int(nbits):
- """Reads a random integer of approximately nbits bits rounded up
- to whole bytes"""
-
- nbytes = int(math.ceil(nbits/8.))
- randomdata = os.urandom(nbytes)
- return bytes2int(randomdata)
-
-def randint(minvalue, maxvalue):
- """Returns a random integer x with minvalue <= x <= maxvalue"""
-
- # Safety - get a lot of random data even if the range is fairly
- # small
- min_nbits = 32
-
- # The range of the random numbers we need to generate
- range = (maxvalue - minvalue) + 1
-
- # Which is this number of bytes
- rangebytes = ((bit_size(range) + 7) / 8)
-
- # Convert to bits, but make sure it's always at least min_nbits*2
- rangebits = max(rangebytes * 8, min_nbits * 2)
-
- # Take a random number of bits between min_nbits and rangebits
- nbits = random.randint(min_nbits, rangebits)
-
- return (read_random_int(nbits) % range) + minvalue
-
-def jacobi(a, b):
- """Calculates the value of the Jacobi symbol (a/b)
- where both a and b are positive integers, and b is odd
- """
-
- if a == 0: return 0
- result = 1
- while a > 1:
- if a & 1:
- if ((a-1)*(b-1) >> 2) & 1:
- result = -result
- a, b = b % a, a
- else:
- if (((b * b) - 1) >> 3) & 1:
- result = -result
- a >>= 1
- if a == 0: return 0
- return result
-
-def jacobi_witness(x, n):
- """Returns False if n is an Euler pseudo-prime with base x, and
- True otherwise.
- """
-
- j = jacobi(x, n) % n
- f = pow(x, (n-1)/2, n)
-
- if j == f: return False
- return True
-
-def randomized_primality_testing(n, k):
- """Calculates whether n is composite (which is always correct) or
- prime (which is incorrect with error probability 2**-k)
-
- Returns False if the number is composite, and True if it's
- probably prime.
- """
-
- # 50% of Jacobi-witnesses can report compositness of non-prime numbers
-
- for i in range(k):
- x = randint(1, n-1)
- if jacobi_witness(x, n): return False
-
- return True
-
-def is_prime(number):
- """Returns True if the number is prime, and False otherwise.
-
- >>> is_prime(42)
- 0
- >>> is_prime(41)
- 1
- """
-
- if randomized_primality_testing(number, 6):
- # Prime, according to Jacobi
- return True
-
- # Not prime
- return False
-
-
-def getprime(nbits):
- """Returns a prime number of max. 'math.ceil(nbits/8)*8' bits. In
- other words: nbits is rounded up to whole bytes.
-
- >>> p = getprime(8)
- >>> is_prime(p-1)
- 0
- >>> is_prime(p)
- 1
- >>> is_prime(p+1)
- 0
- """
-
- while True:
- integer = read_random_int(nbits)
-
- # Make sure it's odd
- integer |= 1
-
- # Test for primeness
- if is_prime(integer): break
-
- # Retry if not prime
-
- return integer
-
-def are_relatively_prime(a, b):
- """Returns True if a and b are relatively prime, and False if they
- are not.
-
- >>> are_relatively_prime(2, 3)
- 1
- >>> are_relatively_prime(2, 4)
- 0
- """
-
- d = gcd(a, b)
- return (d == 1)
-
-def find_p_q(nbits):
- """Returns a tuple of two different primes of nbits bits"""
- pbits = nbits + (nbits/16) #Make sure that p and q aren't too close
- qbits = nbits - (nbits/16) #or the factoring programs can factor n
- p = getprime(pbits)
- while True:
- q = getprime(qbits)
- #Make sure p and q are different.
- if not q == p: break
- return (p, q)
-
-def extended_gcd(a, b):
- """Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb
- """
- # r = gcd(a,b) i = multiplicitive inverse of a mod b
- # or j = multiplicitive inverse of b mod a
- # Neg return values for i or j are made positive mod b or a respectively
- # Iterateive Version is faster and uses much less stack space
- x = 0
- y = 1
- lx = 1
- ly = 0
- oa = a #Remember original a/b to remove
- ob = b #negative values from return results
- while b != 0:
- q = long(a/b)
- (a, b) = (b, a % b)
- (x, lx) = ((lx - (q * x)),x)
- (y, ly) = ((ly - (q * y)),y)
- if (lx < 0): lx += ob #If neg wrap modulo orignal b
- if (ly < 0): ly += oa #If neg wrap modulo orignal a
- return (a, lx, ly) #Return only positive values
-
-# Main function: calculate encryption and decryption keys
-def calculate_keys(p, q, nbits):
- """Calculates an encryption and a decryption key for p and q, and
- returns them as a tuple (e, d)"""
-
- n = p * q
- phi_n = (p-1) * (q-1)
-
- while True:
- # Make sure e has enough bits so we ensure "wrapping" through
- # modulo n
- e = max(65537,getprime(nbits/4))
- if are_relatively_prime(e, n) and are_relatively_prime(e, phi_n): break
-
- (d, i, j) = extended_gcd(e, phi_n)
-
- if not d == 1:
- raise Exception("e (%d) and phi_n (%d) are not relatively prime" % (e, phi_n))
- if (i < 0):
- raise Exception("New extended_gcd shouldn't return negative values")
- if not (e * i) % phi_n == 1:
- raise Exception("e (%d) and i (%d) are not mult. inv. modulo phi_n (%d)" % (e, i, phi_n))
-
- return (e, i)
-
-
-def gen_keys(nbits):
- """Generate RSA keys of nbits bits. Returns (p, q, e, d).
-
- Note: this can take a long time, depending on the key size.
- """
-
- (p, q) = find_p_q(nbits)
- (e, d) = calculate_keys(p, q, nbits)
-
- return (p, q, e, d)
-
-def newkeys(nbits):
- """Generates public and private keys, and returns them as (pub,
- priv).
-
- The public key consists of a dict {e: ..., , n: ....). The private
- key consists of a dict {d: ...., p: ...., q: ....).
- """
- nbits = max(9,nbits) # Don't let nbits go below 9 bits
- (p, q, e, d) = gen_keys(nbits)
-
- return ( {'e': e, 'n': p*q}, {'d': d, 'p': p, 'q': q} )
-
-def encrypt_int(message, ekey, n):
- """Encrypts a message using encryption key 'ekey', working modulo n"""
-
- if type(message) is types.IntType:
- message = long(message)
-
- if not type(message) is types.LongType:
- raise TypeError("You must pass a long or int")
-
- if message < 0 or message > n:
- raise OverflowError("The message is too long")
-
- #Note: Bit exponents start at zero (bit counts start at 1) this is correct
- safebit = bit_size(n) - 2 #compute safe bit (MSB - 1)
- message += (1 << safebit) #add safebit to ensure folding
-
- return pow(message, ekey, n)
-
-def decrypt_int(cyphertext, dkey, n):
- """Decrypts a cypher text using the decryption key 'dkey', working
- modulo n"""
-
- message = pow(cyphertext, dkey, n)
-
- safebit = bit_size(n) - 2 #compute safe bit (MSB - 1)
- message -= (1 << safebit) #remove safebit before decode
-
- return message
-
-def encode64chops(chops):
- """base64encodes chops and combines them into a ',' delimited string"""
-
- chips = [] #chips are character chops
-
- for value in chops:
- chips.append(int2str64(value))
-
- #delimit chops with comma
- encoded = ','.join(chips)
-
- return encoded
-
-def decode64chops(string):
- """base64decodes and makes a ',' delimited string into chops"""
-
- chips = string.split(',') #split chops at commas
-
- chops = []
-
- for string in chips: #make char chops (chips) into chops
- chops.append(str642int(string))
-
- return chops
-
-def chopstring(message, key, n, funcref):
- """Chops the 'message' into integers that fit into n,
- leaving room for a safebit to be added to ensure that all
- messages fold during exponentiation. The MSB of the number n
- is not independant modulo n (setting it could cause overflow), so
- use the next lower bit for the safebit. Therefore reserve 2-bits
- in the number n for non-data bits. Calls specified encryption
- function for each chop.
-
- Used by 'encrypt' and 'sign'.
- """
-
- msglen = len(message)
- mbits = msglen * 8
- #Set aside 2-bits so setting of safebit won't overflow modulo n.
- nbits = bit_size(n) - 2 # leave room for safebit
- nbytes = nbits / 8
- blocks = msglen / nbytes
-
- if msglen % nbytes > 0:
- blocks += 1
-
- cypher = []
-
- for bindex in range(blocks):
- offset = bindex * nbytes
- block = message[offset:offset+nbytes]
- value = bytes2int(block)
- cypher.append(funcref(value, key, n))
-
- return encode64chops(cypher) #Encode encrypted ints to base64 strings
-
-def gluechops(string, key, n, funcref):
- """Glues chops back together into a string. calls
- funcref(integer, key, n) for each chop.
-
- Used by 'decrypt' and 'verify'.
- """
- message = ""
-
- chops = decode64chops(string) #Decode base64 strings into integer chops
-
- for cpart in chops:
- mpart = funcref(cpart, key, n) #Decrypt each chop
- message += int2bytes(mpart) #Combine decrypted strings into a msg
-
- return message
-
-def encrypt(message, key):
- """Encrypts a string 'message' with the public key 'key'"""
- if 'n' not in key:
- raise Exception("You must use the public key with encrypt")
-
- return chopstring(message, key['e'], key['n'], encrypt_int)
-
-def sign(message, key):
- """Signs a string 'message' with the private key 'key'"""
- if 'p' not in key:
- raise Exception("You must use the private key with sign")
-
- return chopstring(message, key['d'], key['p']*key['q'], encrypt_int)
-
-def decrypt(cypher, key):
- """Decrypts a string 'cypher' with the private key 'key'"""
- if 'p' not in key:
- raise Exception("You must use the private key with decrypt")
-
- return gluechops(cypher, key['d'], key['p']*key['q'], decrypt_int)
-
-def verify(cypher, key):
- """Verifies a string 'cypher' with the public key 'key'"""
- if 'n' not in key:
- raise Exception("You must use the public key with verify")
-
- return gluechops(cypher, key['e'], key['n'], decrypt_int)
-
-# Do doctest if we're not imported
-if __name__ == "__main__":
- import doctest
- doctest.testmod()
-
-__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify"]
-
diff --git a/python-packages/rsa/bigfile.py b/python-packages/rsa/bigfile.py
deleted file mode 100644
index 516cf56b51..0000000000
--- a/python-packages/rsa/bigfile.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''Large file support
-
- - break a file into smaller blocks, and encrypt them, and store the
- encrypted blocks in another file.
-
- - take such an encrypted files, decrypt its blocks, and reconstruct the
- original file.
-
-The encrypted file format is as follows, where || denotes byte concatenation:
-
- FILE := VERSION || BLOCK || BLOCK ...
-
- BLOCK := LENGTH || DATA
-
- LENGTH := varint-encoded length of the subsequent data. Varint comes from
- Google Protobuf, and encodes an integer into a variable number of bytes.
- Each byte uses the 7 lowest bits to encode the value. The highest bit set
- to 1 indicates the next byte is also part of the varint. The last byte will
- have this bit set to 0.
-
-This file format is called the VARBLOCK format, in line with the varint format
-used to denote the block sizes.
-
-'''
-
-from rsa import key, common, pkcs1, varblock
-from rsa._compat import byte
-
-def encrypt_bigfile(infile, outfile, pub_key):
- '''Encrypts a file, writing it to 'outfile' in VARBLOCK format.
-
- :param infile: file-like object to read the cleartext from
- :param outfile: file-like object to write the crypto in VARBLOCK format to
- :param pub_key: :py:class:`rsa.PublicKey` to encrypt with
-
- '''
-
- if not isinstance(pub_key, key.PublicKey):
- raise TypeError('Public key required, but got %r' % pub_key)
-
- key_bytes = common.bit_size(pub_key.n) // 8
- blocksize = key_bytes - 11 # keep space for PKCS#1 padding
-
- # Write the version number to the VARBLOCK file
- outfile.write(byte(varblock.VARBLOCK_VERSION))
-
- # Encrypt and write each block
- for block in varblock.yield_fixedblocks(infile, blocksize):
- crypto = pkcs1.encrypt(block, pub_key)
-
- varblock.write_varint(outfile, len(crypto))
- outfile.write(crypto)
-
-def decrypt_bigfile(infile, outfile, priv_key):
- '''Decrypts an encrypted VARBLOCK file, writing it to 'outfile'
-
- :param infile: file-like object to read the crypto in VARBLOCK format from
- :param outfile: file-like object to write the cleartext to
- :param priv_key: :py:class:`rsa.PrivateKey` to decrypt with
-
- '''
-
- if not isinstance(priv_key, key.PrivateKey):
- raise TypeError('Private key required, but got %r' % priv_key)
-
- for block in varblock.yield_varblocks(infile):
- cleartext = pkcs1.decrypt(block, priv_key)
- outfile.write(cleartext)
-
-__all__ = ['encrypt_bigfile', 'decrypt_bigfile']
-
diff --git a/python-packages/rsa/cli.py b/python-packages/rsa/cli.py
deleted file mode 100644
index 2441955aa2..0000000000
--- a/python-packages/rsa/cli.py
+++ /dev/null
@@ -1,379 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''Commandline scripts.
-
-These scripts are called by the executables defined in setup.py.
-'''
-
-from __future__ import with_statement, print_function
-
-import abc
-import sys
-from optparse import OptionParser
-
-import rsa
-import rsa.bigfile
-import rsa.pkcs1
-
-HASH_METHODS = sorted(rsa.pkcs1.HASH_METHODS.keys())
-
-def keygen():
- '''Key generator.'''
-
- # Parse the CLI options
- parser = OptionParser(usage='usage: %prog [options] keysize',
- description='Generates a new RSA keypair of "keysize" bits.')
-
- parser.add_option('--pubout', type='string',
- help='Output filename for the public key. The public key is '
- 'not saved if this option is not present. You can use '
- 'pyrsa-priv2pub to create the public key file later.')
-
- parser.add_option('-o', '--out', type='string',
- help='Output filename for the private key. The key is '
- 'written to stdout if this option is not present.')
-
- parser.add_option('--form',
- help='key format of the private and public keys - default PEM',
- choices=('PEM', 'DER'), default='PEM')
-
- (cli, cli_args) = parser.parse_args(sys.argv[1:])
-
- if len(cli_args) != 1:
- parser.print_help()
- raise SystemExit(1)
-
- try:
- keysize = int(cli_args[0])
- except ValueError:
- parser.print_help()
- print('Not a valid number: %s' % cli_args[0], file=sys.stderr)
- raise SystemExit(1)
-
- print('Generating %i-bit key' % keysize, file=sys.stderr)
- (pub_key, priv_key) = rsa.newkeys(keysize)
-
-
- # Save public key
- if cli.pubout:
- print('Writing public key to %s' % cli.pubout, file=sys.stderr)
- data = pub_key.save_pkcs1(format=cli.form)
- with open(cli.pubout, 'wb') as outfile:
- outfile.write(data)
-
- # Save private key
- data = priv_key.save_pkcs1(format=cli.form)
-
- if cli.out:
- print('Writing private key to %s' % cli.out, file=sys.stderr)
- with open(cli.out, 'wb') as outfile:
- outfile.write(data)
- else:
- print('Writing private key to stdout', file=sys.stderr)
- sys.stdout.write(data)
-
-
-class CryptoOperation(object):
- '''CLI callable that operates with input, output, and a key.'''
-
- __metaclass__ = abc.ABCMeta
-
- keyname = 'public' # or 'private'
- usage = 'usage: %%prog [options] %(keyname)s_key'
- description = None
- operation = 'decrypt'
- operation_past = 'decrypted'
- operation_progressive = 'decrypting'
- input_help = 'Name of the file to %(operation)s. Reads from stdin if ' \
- 'not specified.'
- output_help = 'Name of the file to write the %(operation_past)s file ' \
- 'to. Written to stdout if this option is not present.'
- expected_cli_args = 1
- has_output = True
-
- key_class = rsa.PublicKey
-
- def __init__(self):
- self.usage = self.usage % self.__class__.__dict__
- self.input_help = self.input_help % self.__class__.__dict__
- self.output_help = self.output_help % self.__class__.__dict__
-
- @abc.abstractmethod
- def perform_operation(self, indata, key, cli_args=None):
- '''Performs the program's operation.
-
- Implement in a subclass.
-
- :returns: the data to write to the output.
- '''
-
- def __call__(self):
- '''Runs the program.'''
-
- (cli, cli_args) = self.parse_cli()
-
- key = self.read_key(cli_args[0], cli.keyform)
-
- indata = self.read_infile(cli.input)
-
- print(self.operation_progressive.title(), file=sys.stderr)
- outdata = self.perform_operation(indata, key, cli_args)
-
- if self.has_output:
- self.write_outfile(outdata, cli.output)
-
- def parse_cli(self):
- '''Parse the CLI options
-
- :returns: (cli_opts, cli_args)
- '''
-
- parser = OptionParser(usage=self.usage, description=self.description)
-
- parser.add_option('-i', '--input', type='string', help=self.input_help)
-
- if self.has_output:
- parser.add_option('-o', '--output', type='string', help=self.output_help)
-
- parser.add_option('--keyform',
- help='Key format of the %s key - default PEM' % self.keyname,
- choices=('PEM', 'DER'), default='PEM')
-
- (cli, cli_args) = parser.parse_args(sys.argv[1:])
-
- if len(cli_args) != self.expected_cli_args:
- parser.print_help()
- raise SystemExit(1)
-
- return (cli, cli_args)
-
- def read_key(self, filename, keyform):
- '''Reads a public or private key.'''
-
- print('Reading %s key from %s' % (self.keyname, filename), file=sys.stderr)
- with open(filename, 'rb') as keyfile:
- keydata = keyfile.read()
-
- return self.key_class.load_pkcs1(keydata, keyform)
-
- def read_infile(self, inname):
- '''Read the input file'''
-
- if inname:
- print('Reading input from %s' % inname, file=sys.stderr)
- with open(inname, 'rb') as infile:
- return infile.read()
-
- print('Reading input from stdin', file=sys.stderr)
- return sys.stdin.read()
-
- def write_outfile(self, outdata, outname):
- '''Write the output file'''
-
- if outname:
- print('Writing output to %s' % outname, file=sys.stderr)
- with open(outname, 'wb') as outfile:
- outfile.write(outdata)
- else:
- print('Writing output to stdout', file=sys.stderr)
- sys.stdout.write(outdata)
-
-class EncryptOperation(CryptoOperation):
- '''Encrypts a file.'''
-
- keyname = 'public'
- description = ('Encrypts a file. The file must be shorter than the key '
- 'length in order to be encrypted. For larger files, use the '
- 'pyrsa-encrypt-bigfile command.')
- operation = 'encrypt'
- operation_past = 'encrypted'
- operation_progressive = 'encrypting'
-
-
- def perform_operation(self, indata, pub_key, cli_args=None):
- '''Encrypts files.'''
-
- return rsa.encrypt(indata, pub_key)
-
-class DecryptOperation(CryptoOperation):
- '''Decrypts a file.'''
-
- keyname = 'private'
- description = ('Decrypts a file. The original file must be shorter than '
- 'the key length in order to have been encrypted. For larger '
- 'files, use the pyrsa-decrypt-bigfile command.')
- operation = 'decrypt'
- operation_past = 'decrypted'
- operation_progressive = 'decrypting'
- key_class = rsa.PrivateKey
-
- def perform_operation(self, indata, priv_key, cli_args=None):
- '''Decrypts files.'''
-
- return rsa.decrypt(indata, priv_key)
-
-class SignOperation(CryptoOperation):
- '''Signs a file.'''
-
- keyname = 'private'
- usage = 'usage: %%prog [options] private_key hash_method'
- description = ('Signs a file, outputs the signature. Choose the hash '
- 'method from %s' % ', '.join(HASH_METHODS))
- operation = 'sign'
- operation_past = 'signature'
- operation_progressive = 'Signing'
- key_class = rsa.PrivateKey
- expected_cli_args = 2
-
- output_help = ('Name of the file to write the signature to. Written '
- 'to stdout if this option is not present.')
-
- def perform_operation(self, indata, priv_key, cli_args):
- '''Decrypts files.'''
-
- hash_method = cli_args[1]
- if hash_method not in HASH_METHODS:
- raise SystemExit('Invalid hash method, choose one of %s' %
- ', '.join(HASH_METHODS))
-
- return rsa.sign(indata, priv_key, hash_method)
-
-class VerifyOperation(CryptoOperation):
- '''Verify a signature.'''
-
- keyname = 'public'
- usage = 'usage: %%prog [options] private_key signature_file'
- description = ('Verifies a signature, exits with status 0 upon success, '
- 'prints an error message and exits with status 1 upon error.')
- operation = 'verify'
- operation_past = 'verified'
- operation_progressive = 'Verifying'
- key_class = rsa.PublicKey
- expected_cli_args = 2
- has_output = False
-
- def perform_operation(self, indata, pub_key, cli_args):
- '''Decrypts files.'''
-
- signature_file = cli_args[1]
-
- with open(signature_file, 'rb') as sigfile:
- signature = sigfile.read()
-
- try:
- rsa.verify(indata, signature, pub_key)
- except rsa.VerificationError:
- raise SystemExit('Verification failed.')
-
- print('Verification OK', file=sys.stderr)
-
-
-class BigfileOperation(CryptoOperation):
- '''CryptoOperation that doesn't read the entire file into memory.'''
-
- def __init__(self):
- CryptoOperation.__init__(self)
-
- self.file_objects = []
-
- def __del__(self):
- '''Closes any open file handles.'''
-
- for fobj in self.file_objects:
- fobj.close()
-
- def __call__(self):
- '''Runs the program.'''
-
- (cli, cli_args) = self.parse_cli()
-
- key = self.read_key(cli_args[0], cli.keyform)
-
- # Get the file handles
- infile = self.get_infile(cli.input)
- outfile = self.get_outfile(cli.output)
-
- # Call the operation
- print(self.operation_progressive.title(), file=sys.stderr)
- self.perform_operation(infile, outfile, key, cli_args)
-
- def get_infile(self, inname):
- '''Returns the input file object'''
-
- if inname:
- print('Reading input from %s' % inname, file=sys.stderr)
- fobj = open(inname, 'rb')
- self.file_objects.append(fobj)
- else:
- print('Reading input from stdin', file=sys.stderr)
- fobj = sys.stdin
-
- return fobj
-
- def get_outfile(self, outname):
- '''Returns the output file object'''
-
- if outname:
- print('Will write output to %s' % outname, file=sys.stderr)
- fobj = open(outname, 'wb')
- self.file_objects.append(fobj)
- else:
- print('Will write output to stdout', file=sys.stderr)
- fobj = sys.stdout
-
- return fobj
-
-class EncryptBigfileOperation(BigfileOperation):
- '''Encrypts a file to VARBLOCK format.'''
-
- keyname = 'public'
- description = ('Encrypts a file to an encrypted VARBLOCK file. The file '
- 'can be larger than the key length, but the output file is only '
- 'compatible with Python-RSA.')
- operation = 'encrypt'
- operation_past = 'encrypted'
- operation_progressive = 'encrypting'
-
- def perform_operation(self, infile, outfile, pub_key, cli_args=None):
- '''Encrypts files to VARBLOCK.'''
-
- return rsa.bigfile.encrypt_bigfile(infile, outfile, pub_key)
-
-class DecryptBigfileOperation(BigfileOperation):
- '''Decrypts a file in VARBLOCK format.'''
-
- keyname = 'private'
- description = ('Decrypts an encrypted VARBLOCK file that was encrypted '
- 'with pyrsa-encrypt-bigfile')
- operation = 'decrypt'
- operation_past = 'decrypted'
- operation_progressive = 'decrypting'
- key_class = rsa.PrivateKey
-
- def perform_operation(self, infile, outfile, priv_key, cli_args=None):
- '''Decrypts a VARBLOCK file.'''
-
- return rsa.bigfile.decrypt_bigfile(infile, outfile, priv_key)
-
-
-encrypt = EncryptOperation()
-decrypt = DecryptOperation()
-sign = SignOperation()
-verify = VerifyOperation()
-encrypt_bigfile = EncryptBigfileOperation()
-decrypt_bigfile = DecryptBigfileOperation()
-
diff --git a/python-packages/rsa/common.py b/python-packages/rsa/common.py
deleted file mode 100644
index 39feb8c228..0000000000
--- a/python-packages/rsa/common.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''Common functionality shared by several modules.'''
-
-
-def bit_size(num):
- '''
- Number of bits needed to represent a integer excluding any prefix
- 0 bits.
-
- As per definition from http://wiki.python.org/moin/BitManipulation and
- to match the behavior of the Python 3 API.
-
- Usage::
-
- >>> bit_size(1023)
- 10
- >>> bit_size(1024)
- 11
- >>> bit_size(1025)
- 11
-
- :param num:
- Integer value. If num is 0, returns 0. Only the absolute value of the
- number is considered. Therefore, signed integers will be abs(num)
- before the number's bit length is determined.
- :returns:
- Returns the number of bits in the integer.
- '''
- if num == 0:
- return 0
- if num < 0:
- num = -num
-
- # Make sure this is an int and not a float.
- num & 1
-
- hex_num = "%x" % num
- return ((len(hex_num) - 1) * 4) + {
- '0':0, '1':1, '2':2, '3':2,
- '4':3, '5':3, '6':3, '7':3,
- '8':4, '9':4, 'a':4, 'b':4,
- 'c':4, 'd':4, 'e':4, 'f':4,
- }[hex_num[0]]
-
-
-def _bit_size(number):
- '''
- Returns the number of bits required to hold a specific long number.
- '''
- if number < 0:
- raise ValueError('Only nonnegative numbers possible: %s' % number)
-
- if number == 0:
- return 0
-
- # This works, even with very large numbers. When using math.log(number, 2),
- # you'll get rounding errors and it'll fail.
- bits = 0
- while number:
- bits += 1
- number >>= 1
-
- return bits
-
-
-def byte_size(number):
- '''
- Returns the number of bytes required to hold a specific long number.
-
- The number of bytes is rounded up.
-
- Usage::
-
- >>> byte_size(1 << 1023)
- 128
- >>> byte_size((1 << 1024) - 1)
- 128
- >>> byte_size(1 << 1024)
- 129
-
- :param number:
- An unsigned integer
- :returns:
- The number of bytes required to hold a specific long number.
- '''
- quanta, mod = divmod(bit_size(number), 8)
- if mod or number == 0:
- quanta += 1
- return quanta
- #return int(math.ceil(bit_size(number) / 8.0))
-
-
-def extended_gcd(a, b):
- '''Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb
- '''
- # r = gcd(a,b) i = multiplicitive inverse of a mod b
- # or j = multiplicitive inverse of b mod a
- # Neg return values for i or j are made positive mod b or a respectively
- # Iterateive Version is faster and uses much less stack space
- x = 0
- y = 1
- lx = 1
- ly = 0
- oa = a #Remember original a/b to remove
- ob = b #negative values from return results
- while b != 0:
- q = a // b
- (a, b) = (b, a % b)
- (x, lx) = ((lx - (q * x)),x)
- (y, ly) = ((ly - (q * y)),y)
- if (lx < 0): lx += ob #If neg wrap modulo orignal b
- if (ly < 0): ly += oa #If neg wrap modulo orignal a
- return (a, lx, ly) #Return only positive values
-
-
-def inverse(x, n):
- '''Returns x^-1 (mod n)
-
- >>> inverse(7, 4)
- 3
- >>> (inverse(143, 4) * 143) % 4
- 1
- '''
-
- (divider, inv, _) = extended_gcd(x, n)
-
- if divider != 1:
- raise ValueError("x (%d) and n (%d) are not relatively prime" % (x, n))
-
- return inv
-
-
-def crt(a_values, modulo_values):
- '''Chinese Remainder Theorem.
-
- Calculates x such that x = a[i] (mod m[i]) for each i.
-
- :param a_values: the a-values of the above equation
- :param modulo_values: the m-values of the above equation
- :returns: x such that x = a[i] (mod m[i]) for each i
-
-
- >>> crt([2, 3], [3, 5])
- 8
-
- >>> crt([2, 3, 2], [3, 5, 7])
- 23
-
- >>> crt([2, 3, 0], [7, 11, 15])
- 135
- '''
-
- m = 1
- x = 0
-
- for modulo in modulo_values:
- m *= modulo
-
- for (m_i, a_i) in zip(modulo_values, a_values):
- M_i = m // m_i
- inv = inverse(M_i, m_i)
-
- x = (x + a_i * M_i * inv) % m
-
- return x
-
-if __name__ == '__main__':
- import doctest
- doctest.testmod()
-
diff --git a/python-packages/rsa/core.py b/python-packages/rsa/core.py
deleted file mode 100644
index 90dfee8e57..0000000000
--- a/python-packages/rsa/core.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''Core mathematical operations.
-
-This is the actual core RSA implementation, which is only defined
-mathematically on integers.
-'''
-
-
-from rsa._compat import is_integer
-
-def assert_int(var, name):
-
- if is_integer(var):
- return
-
- raise TypeError('%s should be an integer, not %s' % (name, var.__class__))
-
-def encrypt_int(message, ekey, n):
- '''Encrypts a message using encryption key 'ekey', working modulo n'''
-
- assert_int(message, 'message')
- assert_int(ekey, 'ekey')
- assert_int(n, 'n')
-
- if message < 0:
- raise ValueError('Only non-negative numbers are supported')
-
- if message > n:
- raise OverflowError("The message %i is too long for n=%i" % (message, n))
-
- return pow(message, ekey, n)
-
-def decrypt_int(cyphertext, dkey, n):
- '''Decrypts a cypher text using the decryption key 'dkey', working
- modulo n'''
-
- assert_int(cyphertext, 'cyphertext')
- assert_int(dkey, 'dkey')
- assert_int(n, 'n')
-
- message = pow(cyphertext, dkey, n)
- return message
-
diff --git a/python-packages/rsa/key.py b/python-packages/rsa/key.py
deleted file mode 100644
index 3870541a8f..0000000000
--- a/python-packages/rsa/key.py
+++ /dev/null
@@ -1,581 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''RSA key generation code.
-
-Create new keys with the newkeys() function. It will give you a PublicKey and a
-PrivateKey object.
-
-Loading and saving keys requires the pyasn1 module. This module is imported as
-late as possible, such that other functionality will remain working in absence
-of pyasn1.
-
-'''
-
-import logging
-from rsa._compat import b
-
-import rsa.prime
-import rsa.pem
-import rsa.common
-
-log = logging.getLogger(__name__)
-
-class AbstractKey(object):
- '''Abstract superclass for private and public keys.'''
-
- @classmethod
- def load_pkcs1(cls, keyfile, format='PEM'):
- r'''Loads a key in PKCS#1 DER or PEM format.
-
- :param keyfile: contents of a DER- or PEM-encoded file that contains
- the public key.
- :param format: the format of the file to load; 'PEM' or 'DER'
-
- :return: a PublicKey object
-
- '''
-
- methods = {
- 'PEM': cls._load_pkcs1_pem,
- 'DER': cls._load_pkcs1_der,
- }
-
- if format not in methods:
- formats = ', '.join(sorted(methods.keys()))
- raise ValueError('Unsupported format: %r, try one of %s' % (format,
- formats))
-
- method = methods[format]
- return method(keyfile)
-
- def save_pkcs1(self, format='PEM'):
- '''Saves the public key in PKCS#1 DER or PEM format.
-
- :param format: the format to save; 'PEM' or 'DER'
- :returns: the DER- or PEM-encoded public key.
-
- '''
-
- methods = {
- 'PEM': self._save_pkcs1_pem,
- 'DER': self._save_pkcs1_der,
- }
-
- if format not in methods:
- formats = ', '.join(sorted(methods.keys()))
- raise ValueError('Unsupported format: %r, try one of %s' % (format,
- formats))
-
- method = methods[format]
- return method()
-
-class PublicKey(AbstractKey):
- '''Represents a public RSA key.
-
- This key is also known as the 'encryption key'. It contains the 'n' and 'e'
- values.
-
- Supports attributes as well as dictionary-like access. Attribute accesss is
- faster, though.
-
- >>> PublicKey(5, 3)
- PublicKey(5, 3)
-
- >>> key = PublicKey(5, 3)
- >>> key.n
- 5
- >>> key['n']
- 5
- >>> key.e
- 3
- >>> key['e']
- 3
-
- '''
-
- __slots__ = ('n', 'e')
-
- def __init__(self, n, e):
- self.n = n
- self.e = e
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __repr__(self):
- return 'PublicKey(%i, %i)' % (self.n, self.e)
-
- def __eq__(self, other):
- if other is None:
- return False
-
- if not isinstance(other, PublicKey):
- return False
-
- return self.n == other.n and self.e == other.e
-
- def __ne__(self, other):
- return not (self == other)
-
- @classmethod
- def _load_pkcs1_der(cls, keyfile):
- r'''Loads a key in PKCS#1 DER format.
-
- @param keyfile: contents of a DER-encoded file that contains the public
- key.
- @return: a PublicKey object
-
- First let's construct a DER encoded key:
-
- >>> import base64
- >>> b64der = 'MAwCBQCNGmYtAgMBAAE='
- >>> der = base64.decodestring(b64der)
-
- This loads the file:
-
- >>> PublicKey._load_pkcs1_der(der)
- PublicKey(2367317549, 65537)
-
- '''
-
- from pyasn1.codec.der import decoder
- (priv, _) = decoder.decode(keyfile)
-
- # ASN.1 contents of DER encoded public key:
- #
- # RSAPublicKey ::= SEQUENCE {
- # modulus INTEGER, -- n
- # publicExponent INTEGER, -- e
-
- as_ints = tuple(int(x) for x in priv)
- return cls(*as_ints)
-
- def _save_pkcs1_der(self):
- '''Saves the public key in PKCS#1 DER format.
-
- @returns: the DER-encoded public key.
- '''
-
- from pyasn1.type import univ, namedtype
- from pyasn1.codec.der import encoder
-
- class AsnPubKey(univ.Sequence):
- componentType = namedtype.NamedTypes(
- namedtype.NamedType('modulus', univ.Integer()),
- namedtype.NamedType('publicExponent', univ.Integer()),
- )
-
- # Create the ASN object
- asn_key = AsnPubKey()
- asn_key.setComponentByName('modulus', self.n)
- asn_key.setComponentByName('publicExponent', self.e)
-
- return encoder.encode(asn_key)
-
- @classmethod
- def _load_pkcs1_pem(cls, keyfile):
- '''Loads a PKCS#1 PEM-encoded public key file.
-
- The contents of the file before the "-----BEGIN RSA PUBLIC KEY-----" and
- after the "-----END RSA PUBLIC KEY-----" lines is ignored.
-
- @param keyfile: contents of a PEM-encoded file that contains the public
- key.
- @return: a PublicKey object
- '''
-
- der = rsa.pem.load_pem(keyfile, 'RSA PUBLIC KEY')
- return cls._load_pkcs1_der(der)
-
- def _save_pkcs1_pem(self):
- '''Saves a PKCS#1 PEM-encoded public key file.
-
- @return: contents of a PEM-encoded file that contains the public key.
- '''
-
- der = self._save_pkcs1_der()
- return rsa.pem.save_pem(der, 'RSA PUBLIC KEY')
-
-class PrivateKey(AbstractKey):
- '''Represents a private RSA key.
-
- This key is also known as the 'decryption key'. It contains the 'n', 'e',
- 'd', 'p', 'q' and other values.
-
- Supports attributes as well as dictionary-like access. Attribute accesss is
- faster, though.
-
- >>> PrivateKey(3247, 65537, 833, 191, 17)
- PrivateKey(3247, 65537, 833, 191, 17)
-
- exp1, exp2 and coef don't have to be given, they will be calculated:
-
- >>> pk = PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
- >>> pk.exp1
- 55063
- >>> pk.exp2
- 10095
- >>> pk.coef
- 50797
-
- If you give exp1, exp2 or coef, they will be used as-is:
-
- >>> pk = PrivateKey(1, 2, 3, 4, 5, 6, 7, 8)
- >>> pk.exp1
- 6
- >>> pk.exp2
- 7
- >>> pk.coef
- 8
-
- '''
-
- __slots__ = ('n', 'e', 'd', 'p', 'q', 'exp1', 'exp2', 'coef')
-
- def __init__(self, n, e, d, p, q, exp1=None, exp2=None, coef=None):
- self.n = n
- self.e = e
- self.d = d
- self.p = p
- self.q = q
-
- # Calculate the other values if they aren't supplied
- if exp1 is None:
- self.exp1 = int(d % (p - 1))
- else:
- self.exp1 = exp1
-
- if exp1 is None:
- self.exp2 = int(d % (q - 1))
- else:
- self.exp2 = exp2
-
- if coef is None:
- self.coef = rsa.common.inverse(q, p)
- else:
- self.coef = coef
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __repr__(self):
- return 'PrivateKey(%(n)i, %(e)i, %(d)i, %(p)i, %(q)i)' % self
-
- def __eq__(self, other):
- if other is None:
- return False
-
- if not isinstance(other, PrivateKey):
- return False
-
- return (self.n == other.n and
- self.e == other.e and
- self.d == other.d and
- self.p == other.p and
- self.q == other.q and
- self.exp1 == other.exp1 and
- self.exp2 == other.exp2 and
- self.coef == other.coef)
-
- def __ne__(self, other):
- return not (self == other)
-
- @classmethod
- def _load_pkcs1_der(cls, keyfile):
- r'''Loads a key in PKCS#1 DER format.
-
- @param keyfile: contents of a DER-encoded file that contains the private
- key.
- @return: a PrivateKey object
-
- First let's construct a DER encoded key:
-
- >>> import base64
- >>> b64der = 'MC4CAQACBQDeKYlRAgMBAAECBQDHn4npAgMA/icCAwDfxwIDANcXAgInbwIDAMZt'
- >>> der = base64.decodestring(b64der)
-
- This loads the file:
-
- >>> PrivateKey._load_pkcs1_der(der)
- PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
-
- '''
-
- from pyasn1.codec.der import decoder
- (priv, _) = decoder.decode(keyfile)
-
- # ASN.1 contents of DER encoded private key:
- #
- # RSAPrivateKey ::= SEQUENCE {
- # version Version,
- # modulus INTEGER, -- n
- # publicExponent INTEGER, -- e
- # privateExponent INTEGER, -- d
- # prime1 INTEGER, -- p
- # prime2 INTEGER, -- q
- # exponent1 INTEGER, -- d mod (p-1)
- # exponent2 INTEGER, -- d mod (q-1)
- # coefficient INTEGER, -- (inverse of q) mod p
- # otherPrimeInfos OtherPrimeInfos OPTIONAL
- # }
-
- if priv[0] != 0:
- raise ValueError('Unable to read this file, version %s != 0' % priv[0])
-
- as_ints = tuple(int(x) for x in priv[1:9])
- return cls(*as_ints)
-
- def _save_pkcs1_der(self):
- '''Saves the private key in PKCS#1 DER format.
-
- @returns: the DER-encoded private key.
- '''
-
- from pyasn1.type import univ, namedtype
- from pyasn1.codec.der import encoder
-
- class AsnPrivKey(univ.Sequence):
- componentType = namedtype.NamedTypes(
- namedtype.NamedType('version', univ.Integer()),
- namedtype.NamedType('modulus', univ.Integer()),
- namedtype.NamedType('publicExponent', univ.Integer()),
- namedtype.NamedType('privateExponent', univ.Integer()),
- namedtype.NamedType('prime1', univ.Integer()),
- namedtype.NamedType('prime2', univ.Integer()),
- namedtype.NamedType('exponent1', univ.Integer()),
- namedtype.NamedType('exponent2', univ.Integer()),
- namedtype.NamedType('coefficient', univ.Integer()),
- )
-
- # Create the ASN object
- asn_key = AsnPrivKey()
- asn_key.setComponentByName('version', 0)
- asn_key.setComponentByName('modulus', self.n)
- asn_key.setComponentByName('publicExponent', self.e)
- asn_key.setComponentByName('privateExponent', self.d)
- asn_key.setComponentByName('prime1', self.p)
- asn_key.setComponentByName('prime2', self.q)
- asn_key.setComponentByName('exponent1', self.exp1)
- asn_key.setComponentByName('exponent2', self.exp2)
- asn_key.setComponentByName('coefficient', self.coef)
-
- return encoder.encode(asn_key)
-
- @classmethod
- def _load_pkcs1_pem(cls, keyfile):
- '''Loads a PKCS#1 PEM-encoded private key file.
-
- The contents of the file before the "-----BEGIN RSA PRIVATE KEY-----" and
- after the "-----END RSA PRIVATE KEY-----" lines is ignored.
-
- @param keyfile: contents of a PEM-encoded file that contains the private
- key.
- @return: a PrivateKey object
- '''
-
- der = rsa.pem.load_pem(keyfile, b('RSA PRIVATE KEY'))
- return cls._load_pkcs1_der(der)
-
- def _save_pkcs1_pem(self):
- '''Saves a PKCS#1 PEM-encoded private key file.
-
- @return: contents of a PEM-encoded file that contains the private key.
- '''
-
- der = self._save_pkcs1_der()
- return rsa.pem.save_pem(der, b('RSA PRIVATE KEY'))
-
-def find_p_q(nbits, getprime_func=rsa.prime.getprime, accurate=True):
- ''''Returns a tuple of two different primes of nbits bits each.
-
- The resulting p * q has exacty 2 * nbits bits, and the returned p and q
- will not be equal.
-
- :param nbits: the number of bits in each of p and q.
- :param getprime_func: the getprime function, defaults to
- :py:func:`rsa.prime.getprime`.
-
- *Introduced in Python-RSA 3.1*
-
- :param accurate: whether to enable accurate mode or not.
- :returns: (p, q), where p > q
-
- >>> (p, q) = find_p_q(128)
- >>> from rsa import common
- >>> common.bit_size(p * q)
- 256
-
- When not in accurate mode, the number of bits can be slightly less
-
- >>> (p, q) = find_p_q(128, accurate=False)
- >>> from rsa import common
- >>> common.bit_size(p * q) <= 256
- True
- >>> common.bit_size(p * q) > 240
- True
-
- '''
-
- total_bits = nbits * 2
-
- # Make sure that p and q aren't too close or the factoring programs can
- # factor n.
- shift = nbits // 16
- pbits = nbits + shift
- qbits = nbits - shift
-
- # Choose the two initial primes
- log.debug('find_p_q(%i): Finding p', nbits)
- p = getprime_func(pbits)
- log.debug('find_p_q(%i): Finding q', nbits)
- q = getprime_func(qbits)
-
- def is_acceptable(p, q):
- '''Returns True iff p and q are acceptable:
-
- - p and q differ
- - (p * q) has the right nr of bits (when accurate=True)
- '''
-
- if p == q:
- return False
-
- if not accurate:
- return True
-
- # Make sure we have just the right amount of bits
- found_size = rsa.common.bit_size(p * q)
- return total_bits == found_size
-
- # Keep choosing other primes until they match our requirements.
- change_p = False
- while not is_acceptable(p, q):
- # Change p on one iteration and q on the other
- if change_p:
- p = getprime_func(pbits)
- else:
- q = getprime_func(qbits)
-
- change_p = not change_p
-
- # We want p > q as described on
- # http://www.di-mgt.com.au/rsa_alg.html#crt
- return (max(p, q), min(p, q))
-
-def calculate_keys(p, q, nbits):
- '''Calculates an encryption and a decryption key given p and q, and
- returns them as a tuple (e, d)
-
- '''
-
- phi_n = (p - 1) * (q - 1)
-
- # A very common choice for e is 65537
- e = 65537
-
- try:
- d = rsa.common.inverse(e, phi_n)
- except ValueError:
- raise ValueError("e (%d) and phi_n (%d) are not relatively prime" %
- (e, phi_n))
-
- if (e * d) % phi_n != 1:
- raise ValueError("e (%d) and d (%d) are not mult. inv. modulo "
- "phi_n (%d)" % (e, d, phi_n))
-
- return (e, d)
-
-def gen_keys(nbits, getprime_func, accurate=True):
- '''Generate RSA keys of nbits bits. Returns (p, q, e, d).
-
- Note: this can take a long time, depending on the key size.
-
- :param nbits: the total number of bits in ``p`` and ``q``. Both ``p`` and
- ``q`` will use ``nbits/2`` bits.
- :param getprime_func: either :py:func:`rsa.prime.getprime` or a function
- with similar signature.
- '''
-
- (p, q) = find_p_q(nbits // 2, getprime_func, accurate)
- (e, d) = calculate_keys(p, q, nbits // 2)
-
- return (p, q, e, d)
-
-def newkeys(nbits, accurate=True, poolsize=1):
- '''Generates public and private keys, and returns them as (pub, priv).
-
- The public key is also known as the 'encryption key', and is a
- :py:class:`rsa.PublicKey` object. The private key is also known as the
- 'decryption key' and is a :py:class:`rsa.PrivateKey` object.
-
- :param nbits: the number of bits required to store ``n = p*q``.
- :param accurate: when True, ``n`` will have exactly the number of bits you
- asked for. However, this makes key generation much slower. When False,
- `n`` may have slightly less bits.
- :param poolsize: the number of processes to use to generate the prime
- numbers. If set to a number > 1, a parallel algorithm will be used.
- This requires Python 2.6 or newer.
-
- :returns: a tuple (:py:class:`rsa.PublicKey`, :py:class:`rsa.PrivateKey`)
-
- The ``poolsize`` parameter was added in *Python-RSA 3.1* and requires
- Python 2.6 or newer.
-
- '''
-
- if nbits < 16:
- raise ValueError('Key too small')
-
- if poolsize < 1:
- raise ValueError('Pool size (%i) should be >= 1' % poolsize)
-
- # Determine which getprime function to use
- if poolsize > 1:
- from rsa import parallel
- import functools
-
- getprime_func = functools.partial(parallel.getprime, poolsize=poolsize)
- else: getprime_func = rsa.prime.getprime
-
- # Generate the key components
- (p, q, e, d) = gen_keys(nbits, getprime_func)
-
- # Create the key objects
- n = p * q
-
- return (
- PublicKey(n, e),
- PrivateKey(n, e, d, p, q)
- )
-
-__all__ = ['PublicKey', 'PrivateKey', 'newkeys']
-
-if __name__ == '__main__':
- import doctest
-
- try:
- for count in range(100):
- (failures, tests) = doctest.testmod()
- if failures:
- break
-
- if (count and count % 10 == 0) or count == 1:
- print('%i times' % count)
- except KeyboardInterrupt:
- print('Aborted')
- else:
- print('Doctests done')
diff --git a/python-packages/rsa/parallel.py b/python-packages/rsa/parallel.py
deleted file mode 100644
index e5034ac707..0000000000
--- a/python-packages/rsa/parallel.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''Functions for parallel computation on multiple cores.
-
-Introduced in Python-RSA 3.1.
-
-.. note::
-
- Requires Python 2.6 or newer.
-
-'''
-
-from __future__ import print_function
-
-import multiprocessing as mp
-
-import rsa.prime
-import rsa.randnum
-
-def _find_prime(nbits, pipe):
- while True:
- integer = rsa.randnum.read_random_int(nbits)
-
- # Make sure it's odd
- integer |= 1
-
- # Test for primeness
- if rsa.prime.is_prime(integer):
- pipe.send(integer)
- return
-
-def getprime(nbits, poolsize):
- '''Returns a prime number that can be stored in 'nbits' bits.
-
- Works in multiple threads at the same time.
-
- >>> p = getprime(128, 3)
- >>> rsa.prime.is_prime(p-1)
- False
- >>> rsa.prime.is_prime(p)
- True
- >>> rsa.prime.is_prime(p+1)
- False
-
- >>> from rsa import common
- >>> common.bit_size(p) == 128
- True
-
- '''
-
- (pipe_recv, pipe_send) = mp.Pipe(duplex=False)
-
- # Create processes
- procs = [mp.Process(target=_find_prime, args=(nbits, pipe_send))
- for _ in range(poolsize)]
- [p.start() for p in procs]
-
- result = pipe_recv.recv()
-
- [p.terminate() for p in procs]
-
- return result
-
-__all__ = ['getprime']
-
-
-if __name__ == '__main__':
- print('Running doctests 1000x or until failure')
- import doctest
-
- for count in range(100):
- (failures, tests) = doctest.testmod()
- if failures:
- break
-
- if count and count % 10 == 0:
- print('%i times' % count)
-
- print('Doctests done')
-
diff --git a/python-packages/rsa/pem.py b/python-packages/rsa/pem.py
deleted file mode 100644
index b1c3a0edb4..0000000000
--- a/python-packages/rsa/pem.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''Functions that load and write PEM-encoded files.'''
-
-import base64
-from rsa._compat import b, is_bytes
-
-def _markers(pem_marker):
- '''
- Returns the start and end PEM markers
- '''
-
- if is_bytes(pem_marker):
- pem_marker = pem_marker.decode('utf-8')
-
- return (b('-----BEGIN %s-----' % pem_marker),
- b('-----END %s-----' % pem_marker))
-
-def load_pem(contents, pem_marker):
- '''Loads a PEM file.
-
- @param contents: the contents of the file to interpret
- @param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
- when your file has '-----BEGIN RSA PRIVATE KEY-----' and
- '-----END RSA PRIVATE KEY-----' markers.
-
- @return the base64-decoded content between the start and end markers.
-
- @raise ValueError: when the content is invalid, for example when the start
- marker cannot be found.
-
- '''
-
- (pem_start, pem_end) = _markers(pem_marker)
-
- pem_lines = []
- in_pem_part = False
-
- for line in contents.splitlines():
- line = line.strip()
-
- # Skip empty lines
- if not line:
- continue
-
- # Handle start marker
- if line == pem_start:
- if in_pem_part:
- raise ValueError('Seen start marker "%s" twice' % pem_start)
-
- in_pem_part = True
- continue
-
- # Skip stuff before first marker
- if not in_pem_part:
- continue
-
- # Handle end marker
- if in_pem_part and line == pem_end:
- in_pem_part = False
- break
-
- # Load fields
- if b(':') in line:
- continue
-
- pem_lines.append(line)
-
- # Do some sanity checks
- if not pem_lines:
- raise ValueError('No PEM start marker "%s" found' % pem_start)
-
- if in_pem_part:
- raise ValueError('No PEM end marker "%s" found' % pem_end)
-
- # Base64-decode the contents
- pem = b('').join(pem_lines)
- return base64.decodestring(pem)
-
-
-def save_pem(contents, pem_marker):
- '''Saves a PEM file.
-
- @param contents: the contents to encode in PEM format
- @param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
- when your file has '-----BEGIN RSA PRIVATE KEY-----' and
- '-----END RSA PRIVATE KEY-----' markers.
-
- @return the base64-encoded content between the start and end markers.
-
- '''
-
- (pem_start, pem_end) = _markers(pem_marker)
-
- b64 = base64.encodestring(contents).replace(b('\n'), b(''))
- pem_lines = [pem_start]
-
- for block_start in range(0, len(b64), 64):
- block = b64[block_start:block_start + 64]
- pem_lines.append(block)
-
- pem_lines.append(pem_end)
- pem_lines.append(b(''))
-
- return b('\n').join(pem_lines)
-
diff --git a/python-packages/rsa/pkcs1.py b/python-packages/rsa/pkcs1.py
deleted file mode 100644
index 1274fe390d..0000000000
--- a/python-packages/rsa/pkcs1.py
+++ /dev/null
@@ -1,389 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''Functions for PKCS#1 version 1.5 encryption and signing
-
-This module implements certain functionality from PKCS#1 version 1.5. For a
-very clear example, read http://www.di-mgt.com.au/rsa_alg.html#pkcs1schemes
-
-At least 8 bytes of random padding is used when encrypting a message. This makes
-these methods much more secure than the ones in the ``rsa`` module.
-
-WARNING: this module leaks information when decryption or verification fails.
-The exceptions that are raised contain the Python traceback information, which
-can be used to deduce where in the process the failure occurred. DO NOT PASS
-SUCH INFORMATION to your users.
-'''
-
-import hashlib
-import os
-
-from rsa._compat import b
-from rsa import common, transform, core, varblock
-
-# ASN.1 codes that describe the hash algorithm used.
-HASH_ASN1 = {
- 'MD5': b('\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10'),
- 'SHA-1': b('\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'),
- 'SHA-256': b('\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20'),
- 'SHA-384': b('\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30'),
- 'SHA-512': b('\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40'),
-}
-
-HASH_METHODS = {
- 'MD5': hashlib.md5,
- 'SHA-1': hashlib.sha1,
- 'SHA-256': hashlib.sha256,
- 'SHA-384': hashlib.sha384,
- 'SHA-512': hashlib.sha512,
-}
-
-class CryptoError(Exception):
- '''Base class for all exceptions in this module.'''
-
-class DecryptionError(CryptoError):
- '''Raised when decryption fails.'''
-
-class VerificationError(CryptoError):
- '''Raised when verification fails.'''
-
-def _pad_for_encryption(message, target_length):
- r'''Pads the message for encryption, returning the padded message.
-
- :return: 00 02 RANDOM_DATA 00 MESSAGE
-
- >>> block = _pad_for_encryption('hello', 16)
- >>> len(block)
- 16
- >>> block[0:2]
- '\x00\x02'
- >>> block[-6:]
- '\x00hello'
-
- '''
-
- max_msglength = target_length - 11
- msglength = len(message)
-
- if msglength > max_msglength:
- raise OverflowError('%i bytes needed for message, but there is only'
- ' space for %i' % (msglength, max_msglength))
-
- # Get random padding
- padding = b('')
- padding_length = target_length - msglength - 3
-
- # We remove 0-bytes, so we'll end up with less padding than we've asked for,
- # so keep adding data until we're at the correct length.
- while len(padding) < padding_length:
- needed_bytes = padding_length - len(padding)
-
- # Always read at least 8 bytes more than we need, and trim off the rest
- # after removing the 0-bytes. This increases the chance of getting
- # enough bytes, especially when needed_bytes is small
- new_padding = os.urandom(needed_bytes + 5)
- new_padding = new_padding.replace(b('\x00'), b(''))
- padding = padding + new_padding[:needed_bytes]
-
- assert len(padding) == padding_length
-
- return b('').join([b('\x00\x02'),
- padding,
- b('\x00'),
- message])
-
-
-def _pad_for_signing(message, target_length):
- r'''Pads the message for signing, returning the padded message.
-
- The padding is always a repetition of FF bytes.
-
- :return: 00 01 PADDING 00 MESSAGE
-
- >>> block = _pad_for_signing('hello', 16)
- >>> len(block)
- 16
- >>> block[0:2]
- '\x00\x01'
- >>> block[-6:]
- '\x00hello'
- >>> block[2:-6]
- '\xff\xff\xff\xff\xff\xff\xff\xff'
-
- '''
-
- max_msglength = target_length - 11
- msglength = len(message)
-
- if msglength > max_msglength:
- raise OverflowError('%i bytes needed for message, but there is only'
- ' space for %i' % (msglength, max_msglength))
-
- padding_length = target_length - msglength - 3
-
- return b('').join([b('\x00\x01'),
- padding_length * b('\xff'),
- b('\x00'),
- message])
-
-
-def encrypt(message, pub_key):
- '''Encrypts the given message using PKCS#1 v1.5
-
- :param message: the message to encrypt. Must be a byte string no longer than
- ``k-11`` bytes, where ``k`` is the number of bytes needed to encode
- the ``n`` component of the public key.
- :param pub_key: the :py:class:`rsa.PublicKey` to encrypt with.
- :raise OverflowError: when the message is too large to fit in the padded
- block.
-
- >>> from rsa import key, common
- >>> (pub_key, priv_key) = key.newkeys(256)
- >>> message = 'hello'
- >>> crypto = encrypt(message, pub_key)
-
- The crypto text should be just as long as the public key 'n' component:
-
- >>> len(crypto) == common.byte_size(pub_key.n)
- True
-
- '''
-
- keylength = common.byte_size(pub_key.n)
- padded = _pad_for_encryption(message, keylength)
-
- payload = transform.bytes2int(padded)
- encrypted = core.encrypt_int(payload, pub_key.e, pub_key.n)
- block = transform.int2bytes(encrypted, keylength)
-
- return block
-
-def decrypt(crypto, priv_key):
- r'''Decrypts the given message using PKCS#1 v1.5
-
- The decryption is considered 'failed' when the resulting cleartext doesn't
- start with the bytes 00 02, or when the 00 byte between the padding and
- the message cannot be found.
-
- :param crypto: the crypto text as returned by :py:func:`rsa.encrypt`
- :param priv_key: the :py:class:`rsa.PrivateKey` to decrypt with.
- :raise DecryptionError: when the decryption fails. No details are given as
- to why the code thinks the decryption fails, as this would leak
- information about the private key.
-
-
- >>> import rsa
- >>> (pub_key, priv_key) = rsa.newkeys(256)
-
- It works with strings:
-
- >>> crypto = encrypt('hello', pub_key)
- >>> decrypt(crypto, priv_key)
- 'hello'
-
- And with binary data:
-
- >>> crypto = encrypt('\x00\x00\x00\x00\x01', pub_key)
- >>> decrypt(crypto, priv_key)
- '\x00\x00\x00\x00\x01'
-
- Altering the encrypted information will *likely* cause a
- :py:class:`rsa.pkcs1.DecryptionError`. If you want to be *sure*, use
- :py:func:`rsa.sign`.
-
-
- .. warning::
-
- Never display the stack trace of a
- :py:class:`rsa.pkcs1.DecryptionError` exception. It shows where in the
- code the exception occurred, and thus leaks information about the key.
- It's only a tiny bit of information, but every bit makes cracking the
- keys easier.
-
- >>> crypto = encrypt('hello', pub_key)
- >>> crypto = crypto[0:5] + 'X' + crypto[6:] # change a byte
- >>> decrypt(crypto, priv_key)
- Traceback (most recent call last):
- ...
- DecryptionError: Decryption failed
-
- '''
-
- blocksize = common.byte_size(priv_key.n)
- encrypted = transform.bytes2int(crypto)
- decrypted = core.decrypt_int(encrypted, priv_key.d, priv_key.n)
- cleartext = transform.int2bytes(decrypted, blocksize)
-
- # If we can't find the cleartext marker, decryption failed.
- if cleartext[0:2] != b('\x00\x02'):
- raise DecryptionError('Decryption failed')
-
- # Find the 00 separator between the padding and the message
- try:
- sep_idx = cleartext.index(b('\x00'), 2)
- except ValueError:
- raise DecryptionError('Decryption failed')
-
- return cleartext[sep_idx+1:]
-
-def sign(message, priv_key, hash):
- '''Signs the message with the private key.
-
- Hashes the message, then signs the hash with the given key. This is known
- as a "detached signature", because the message itself isn't altered.
-
- :param message: the message to sign. Can be an 8-bit string or a file-like
- object. If ``message`` has a ``read()`` method, it is assumed to be a
- file-like object.
- :param priv_key: the :py:class:`rsa.PrivateKey` to sign with
- :param hash: the hash method used on the message. Use 'MD5', 'SHA-1',
- 'SHA-256', 'SHA-384' or 'SHA-512'.
- :return: a message signature block.
- :raise OverflowError: if the private key is too small to contain the
- requested hash.
-
- '''
-
- # Get the ASN1 code for this hash method
- if hash not in HASH_ASN1:
- raise ValueError('Invalid hash method: %s' % hash)
- asn1code = HASH_ASN1[hash]
-
- # Calculate the hash
- hash = _hash(message, hash)
-
- # Encrypt the hash with the private key
- cleartext = asn1code + hash
- keylength = common.byte_size(priv_key.n)
- padded = _pad_for_signing(cleartext, keylength)
-
- payload = transform.bytes2int(padded)
- encrypted = core.encrypt_int(payload, priv_key.d, priv_key.n)
- block = transform.int2bytes(encrypted, keylength)
-
- return block
-
-def verify(message, signature, pub_key):
- '''Verifies that the signature matches the message.
-
- The hash method is detected automatically from the signature.
-
- :param message: the signed message. Can be an 8-bit string or a file-like
- object. If ``message`` has a ``read()`` method, it is assumed to be a
- file-like object.
- :param signature: the signature block, as created with :py:func:`rsa.sign`.
- :param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
- :raise VerificationError: when the signature doesn't match the message.
-
- .. warning::
-
- Never display the stack trace of a
- :py:class:`rsa.pkcs1.VerificationError` exception. It shows where in
- the code the exception occurred, and thus leaks information about the
- key. It's only a tiny bit of information, but every bit makes cracking
- the keys easier.
-
- '''
-
- blocksize = common.byte_size(pub_key.n)
- encrypted = transform.bytes2int(signature)
- decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
- clearsig = transform.int2bytes(decrypted, blocksize)
-
- # If we can't find the signature marker, verification failed.
- if clearsig[0:2] != b('\x00\x01'):
- raise VerificationError('Verification failed')
-
- # Find the 00 separator between the padding and the payload
- try:
- sep_idx = clearsig.index(b('\x00'), 2)
- except ValueError:
- raise VerificationError('Verification failed')
-
- # Get the hash and the hash method
- (method_name, signature_hash) = _find_method_hash(clearsig[sep_idx+1:])
- message_hash = _hash(message, method_name)
-
- # Compare the real hash to the hash in the signature
- if message_hash != signature_hash:
- raise VerificationError('Verification failed')
-
-def _hash(message, method_name):
- '''Returns the message digest.
-
- :param message: the signed message. Can be an 8-bit string or a file-like
- object. If ``message`` has a ``read()`` method, it is assumed to be a
- file-like object.
- :param method_name: the hash method, must be a key of
- :py:const:`HASH_METHODS`.
-
- '''
-
- if method_name not in HASH_METHODS:
- raise ValueError('Invalid hash method: %s' % method_name)
-
- method = HASH_METHODS[method_name]
- hasher = method()
-
- if hasattr(message, 'read') and hasattr(message.read, '__call__'):
- # read as 1K blocks
- for block in varblock.yield_fixedblocks(message, 1024):
- hasher.update(block)
- else:
- # hash the message object itself.
- hasher.update(message)
-
- return hasher.digest()
-
-
-def _find_method_hash(method_hash):
- '''Finds the hash method and the hash itself.
-
- :param method_hash: ASN1 code for the hash method concatenated with the
- hash itself.
-
- :return: tuple (method, hash) where ``method`` is the used hash method, and
- ``hash`` is the hash itself.
-
- :raise VerificationFailed: when the hash method cannot be found
-
- '''
-
- for (hashname, asn1code) in HASH_ASN1.items():
- if not method_hash.startswith(asn1code):
- continue
-
- return (hashname, method_hash[len(asn1code):])
-
- raise VerificationError('Verification failed')
-
-
-__all__ = ['encrypt', 'decrypt', 'sign', 'verify',
- 'DecryptionError', 'VerificationError', 'CryptoError']
-
-if __name__ == '__main__':
- print('Running doctests 1000x or until failure')
- import doctest
-
- for count in range(1000):
- (failures, tests) = doctest.testmod()
- if failures:
- break
-
- if count and count % 100 == 0:
- print('%i times' % count)
-
- print('Doctests done')
diff --git a/python-packages/rsa/prime.py b/python-packages/rsa/prime.py
deleted file mode 100644
index 7422eb1d28..0000000000
--- a/python-packages/rsa/prime.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''Numerical functions related to primes.
-
-Implementation based on the book Algorithm Design by Michael T. Goodrich and
-Roberto Tamassia, 2002.
-'''
-
-__all__ = [ 'getprime', 'are_relatively_prime']
-
-import rsa.randnum
-
-def gcd(p, q):
- '''Returns the greatest common divisor of p and q
-
- >>> gcd(48, 180)
- 12
- '''
-
- while q != 0:
- if p < q: (p,q) = (q,p)
- (p,q) = (q, p % q)
- return p
-
-
-def jacobi(a, b):
- '''Calculates the value of the Jacobi symbol (a/b) where both a and b are
- positive integers, and b is odd
-
- :returns: -1, 0 or 1
- '''
-
- assert a > 0
- assert b > 0
-
- if a == 0: return 0
- result = 1
- while a > 1:
- if a & 1:
- if ((a-1)*(b-1) >> 2) & 1:
- result = -result
- a, b = b % a, a
- else:
- if (((b * b) - 1) >> 3) & 1:
- result = -result
- a >>= 1
- if a == 0: return 0
- return result
-
-def jacobi_witness(x, n):
- '''Returns False if n is an Euler pseudo-prime with base x, and
- True otherwise.
- '''
-
- j = jacobi(x, n) % n
-
- f = pow(x, n >> 1, n)
-
- if j == f: return False
- return True
-
-def randomized_primality_testing(n, k):
- '''Calculates whether n is composite (which is always correct) or
- prime (which is incorrect with error probability 2**-k)
-
- Returns False if the number is composite, and True if it's
- probably prime.
- '''
-
- # 50% of Jacobi-witnesses can report compositness of non-prime numbers
-
- # The implemented algorithm using the Jacobi witness function has error
- # probability q <= 0.5, according to Goodrich et. al
- #
- # q = 0.5
- # t = int(math.ceil(k / log(1 / q, 2)))
- # So t = k / log(2, 2) = k / 1 = k
- # this means we can use range(k) rather than range(t)
-
- for _ in range(k):
- x = rsa.randnum.randint(n-1)
- if jacobi_witness(x, n): return False
-
- return True
-
-def is_prime(number):
- '''Returns True if the number is prime, and False otherwise.
-
- >>> is_prime(42)
- False
- >>> is_prime(41)
- True
- '''
-
- return randomized_primality_testing(number, 6)
-
-def getprime(nbits):
- '''Returns a prime number that can be stored in 'nbits' bits.
-
- >>> p = getprime(128)
- >>> is_prime(p-1)
- False
- >>> is_prime(p)
- True
- >>> is_prime(p+1)
- False
-
- >>> from rsa import common
- >>> common.bit_size(p) == 128
- True
-
- '''
-
- while True:
- integer = rsa.randnum.read_random_int(nbits)
-
- # Make sure it's odd
- integer |= 1
-
- # Test for primeness
- if is_prime(integer):
- return integer
-
- # Retry if not prime
-
-
-def are_relatively_prime(a, b):
- '''Returns True if a and b are relatively prime, and False if they
- are not.
-
- >>> are_relatively_prime(2, 3)
- 1
- >>> are_relatively_prime(2, 4)
- 0
- '''
-
- d = gcd(a, b)
- return (d == 1)
-
-if __name__ == '__main__':
- print('Running doctests 1000x or until failure')
- import doctest
-
- for count in range(1000):
- (failures, tests) = doctest.testmod()
- if failures:
- break
-
- if count and count % 100 == 0:
- print('%i times' % count)
-
- print('Doctests done')
diff --git a/python-packages/rsa/randnum.py b/python-packages/rsa/randnum.py
deleted file mode 100644
index 0e782744c0..0000000000
--- a/python-packages/rsa/randnum.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''Functions for generating random numbers.'''
-
-# Source inspired by code by Yesudeep Mangalapilly
-
-import os
-
-from rsa import common, transform
-from rsa._compat import byte
-
-def read_random_bits(nbits):
- '''Reads 'nbits' random bits.
-
- If nbits isn't a whole number of bytes, an extra byte will be appended with
- only the lower bits set.
- '''
-
- nbytes, rbits = divmod(nbits, 8)
-
- # Get the random bytes
- randomdata = os.urandom(nbytes)
-
- # Add the remaining random bits
- if rbits > 0:
- randomvalue = ord(os.urandom(1))
- randomvalue >>= (8 - rbits)
- randomdata = byte(randomvalue) + randomdata
-
- return randomdata
-
-
-def read_random_int(nbits):
- '''Reads a random integer of approximately nbits bits.
- '''
-
- randomdata = read_random_bits(nbits)
- value = transform.bytes2int(randomdata)
-
- # Ensure that the number is large enough to just fill out the required
- # number of bits.
- value |= 1 << (nbits - 1)
-
- return value
-
-def randint(maxvalue):
- '''Returns a random integer x with 1 <= x <= maxvalue
-
- May take a very long time in specific situations. If maxvalue needs N bits
- to store, the closer maxvalue is to (2 ** N) - 1, the faster this function
- is.
- '''
-
- bit_size = common.bit_size(maxvalue)
-
- tries = 0
- while True:
- value = read_random_int(bit_size)
- if value <= maxvalue:
- break
-
- if tries and tries % 10 == 0:
- # After a lot of tries to get the right number of bits but still
- # smaller than maxvalue, decrease the number of bits by 1. That'll
- # dramatically increase the chances to get a large enough number.
- bit_size -= 1
- tries += 1
-
- return value
-
-
diff --git a/python-packages/rsa/transform.py b/python-packages/rsa/transform.py
deleted file mode 100644
index c740b2d275..0000000000
--- a/python-packages/rsa/transform.py
+++ /dev/null
@@ -1,220 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''Data transformation functions.
-
-From bytes to a number, number to bytes, etc.
-'''
-
-from __future__ import absolute_import
-
-try:
- # We'll use psyco if available on 32-bit architectures to speed up code.
- # Using psyco (if available) cuts down the execution time on Python 2.5
- # at least by half.
- import psyco
- psyco.full()
-except ImportError:
- pass
-
-import binascii
-from struct import pack
-from rsa import common
-from rsa._compat import is_integer, b, byte, get_word_alignment, ZERO_BYTE, EMPTY_BYTE
-
-
-def bytes2int(raw_bytes):
- r'''Converts a list of bytes or an 8-bit string to an integer.
-
- When using unicode strings, encode it to some encoding like UTF8 first.
-
- >>> (((128 * 256) + 64) * 256) + 15
- 8405007
- >>> bytes2int('\x80@\x0f')
- 8405007
-
- '''
-
- return int(binascii.hexlify(raw_bytes), 16)
-
-
-def _int2bytes(number, block_size=None):
- r'''Converts a number to a string of bytes.
-
- Usage::
-
- >>> _int2bytes(123456789)
- '\x07[\xcd\x15'
- >>> bytes2int(_int2bytes(123456789))
- 123456789
-
- >>> _int2bytes(123456789, 6)
- '\x00\x00\x07[\xcd\x15'
- >>> bytes2int(_int2bytes(123456789, 128))
- 123456789
-
- >>> _int2bytes(123456789, 3)
- Traceback (most recent call last):
- ...
- OverflowError: Needed 4 bytes for number, but block size is 3
-
- @param number: the number to convert
- @param block_size: the number of bytes to output. If the number encoded to
- bytes is less than this, the block will be zero-padded. When not given,
- the returned block is not padded.
-
- @throws OverflowError when block_size is given and the number takes up more
- bytes than fit into the block.
- '''
- # Type checking
- if not is_integer(number):
- raise TypeError("You must pass an integer for 'number', not %s" %
- number.__class__)
-
- if number < 0:
- raise ValueError('Negative numbers cannot be used: %i' % number)
-
- # Do some bounds checking
- if number == 0:
- needed_bytes = 1
- raw_bytes = [ZERO_BYTE]
- else:
- needed_bytes = common.byte_size(number)
- raw_bytes = []
-
- # You cannot compare None > 0 in Python 3x. It will fail with a TypeError.
- if block_size and block_size > 0:
- if needed_bytes > block_size:
- raise OverflowError('Needed %i bytes for number, but block size '
- 'is %i' % (needed_bytes, block_size))
-
- # Convert the number to bytes.
- while number > 0:
- raw_bytes.insert(0, byte(number & 0xFF))
- number >>= 8
-
- # Pad with zeroes to fill the block
- if block_size and block_size > 0:
- padding = (block_size - needed_bytes) * ZERO_BYTE
- else:
- padding = EMPTY_BYTE
-
- return padding + EMPTY_BYTE.join(raw_bytes)
-
-
-def bytes_leading(raw_bytes, needle=ZERO_BYTE):
- '''
- Finds the number of prefixed byte occurrences in the haystack.
-
- Useful when you want to deal with padding.
-
- :param raw_bytes:
- Raw bytes.
- :param needle:
- The byte to count. Default \000.
- :returns:
- The number of leading needle bytes.
- '''
- leading = 0
- # Indexing keeps compatibility between Python 2.x and Python 3.x
- _byte = needle[0]
- for x in raw_bytes:
- if x == _byte:
- leading += 1
- else:
- break
- return leading
-
-
-def int2bytes(number, fill_size=None, chunk_size=None, overflow=False):
- '''
- Convert an unsigned integer to bytes (base-256 representation)::
-
- Does not preserve leading zeros if you don't specify a chunk size or
- fill size.
-
- .. NOTE:
- You must not specify both fill_size and chunk_size. Only one
- of them is allowed.
-
- :param number:
- Integer value
- :param fill_size:
- If the optional fill size is given the length of the resulting
- byte string is expected to be the fill size and will be padded
- with prefix zero bytes to satisfy that length.
- :param chunk_size:
- If optional chunk size is given and greater than zero, pad the front of
- the byte string with binary zeros so that the length is a multiple of
- ``chunk_size``.
- :param overflow:
- ``False`` (default). If this is ``True``, no ``OverflowError``
- will be raised when the fill_size is shorter than the length
- of the generated byte sequence. Instead the byte sequence will
- be returned as is.
- :returns:
- Raw bytes (base-256 representation).
- :raises:
- ``OverflowError`` when fill_size is given and the number takes up more
- bytes than fit into the block. This requires the ``overflow``
- argument to this function to be set to ``False`` otherwise, no
- error will be raised.
- '''
- if number < 0:
- raise ValueError("Number must be an unsigned integer: %d" % number)
-
- if fill_size and chunk_size:
- raise ValueError("You can either fill or pad chunks, but not both")
-
- # Ensure these are integers.
- number & 1
-
- raw_bytes = b('')
-
- # Pack the integer one machine word at a time into bytes.
- num = number
- word_bits, _, max_uint, pack_type = get_word_alignment(num)
- pack_format = ">%s" % pack_type
- while num > 0:
- raw_bytes = pack(pack_format, num & max_uint) + raw_bytes
- num >>= word_bits
- # Obtain the index of the first non-zero byte.
- zero_leading = bytes_leading(raw_bytes)
- if number == 0:
- raw_bytes = ZERO_BYTE
- # De-padding.
- raw_bytes = raw_bytes[zero_leading:]
-
- length = len(raw_bytes)
- if fill_size and fill_size > 0:
- if not overflow and length > fill_size:
- raise OverflowError(
- "Need %d bytes for number, but fill size is %d" %
- (length, fill_size)
- )
- raw_bytes = raw_bytes.rjust(fill_size, ZERO_BYTE)
- elif chunk_size and chunk_size > 0:
- remainder = length % chunk_size
- if remainder:
- padding_size = chunk_size - remainder
- raw_bytes = raw_bytes.rjust(length + padding_size, ZERO_BYTE)
- return raw_bytes
-
-
-if __name__ == '__main__':
- import doctest
- doctest.testmod()
-
diff --git a/python-packages/rsa/util.py b/python-packages/rsa/util.py
deleted file mode 100644
index 307bda5d22..0000000000
--- a/python-packages/rsa/util.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''Utility functions.'''
-
-from __future__ import with_statement
-
-import sys
-from optparse import OptionParser
-
-import rsa.key
-
-def private_to_public():
- '''Reads a private key and outputs the corresponding public key.'''
-
- # Parse the CLI options
- parser = OptionParser(usage='usage: %prog [options]',
- description='Reads a private key and outputs the '
- 'corresponding public key. Both private and public keys use '
- 'the format described in PKCS#1 v1.5')
-
- parser.add_option('-i', '--input', dest='infilename', type='string',
- help='Input filename. Reads from stdin if not specified')
- parser.add_option('-o', '--output', dest='outfilename', type='string',
- help='Output filename. Writes to stdout of not specified')
-
- parser.add_option('--inform', dest='inform',
- help='key format of input - default PEM',
- choices=('PEM', 'DER'), default='PEM')
-
- parser.add_option('--outform', dest='outform',
- help='key format of output - default PEM',
- choices=('PEM', 'DER'), default='PEM')
-
- (cli, cli_args) = parser.parse_args(sys.argv)
-
- # Read the input data
- if cli.infilename:
- print >>sys.stderr, 'Reading private key from %s in %s format' % \
- (cli.infilename, cli.inform)
- with open(cli.infilename) as infile:
- in_data = infile.read()
- else:
- print >>sys.stderr, 'Reading private key from stdin in %s format' % \
- cli.inform
- in_data = sys.stdin.read()
-
-
- # Take the public fields and create a public key
- priv_key = rsa.key.PrivateKey.load_pkcs1(in_data, cli.inform)
- pub_key = rsa.key.PublicKey(priv_key.n, priv_key.e)
-
- # Save to the output file
- out_data = pub_key.save_pkcs1(cli.outform)
-
- if cli.outfilename:
- print >>sys.stderr, 'Writing public key to %s in %s format' % \
- (cli.outfilename, cli.outform)
- with open(cli.outfilename, 'w') as outfile:
- outfile.write(out_data)
- else:
- print >>sys.stderr, 'Writing public key to stdout in %s format' % \
- cli.outform
- sys.stdout.write(out_data)
-
-
diff --git a/python-packages/rsa/varblock.py b/python-packages/rsa/varblock.py
deleted file mode 100644
index c7d96ae6a7..0000000000
--- a/python-packages/rsa/varblock.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2011 Sybren A. StĂĽvel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''VARBLOCK file support
-
-The VARBLOCK file format is as follows, where || denotes byte concatenation:
-
- FILE := VERSION || BLOCK || BLOCK ...
-
- BLOCK := LENGTH || DATA
-
- LENGTH := varint-encoded length of the subsequent data. Varint comes from
- Google Protobuf, and encodes an integer into a variable number of bytes.
- Each byte uses the 7 lowest bits to encode the value. The highest bit set
- to 1 indicates the next byte is also part of the varint. The last byte will
- have this bit set to 0.
-
-This file format is called the VARBLOCK format, in line with the varint format
-used to denote the block sizes.
-
-'''
-
-from rsa._compat import byte, b
-
-
-ZERO_BYTE = b('\x00')
-VARBLOCK_VERSION = 1
-
-def read_varint(infile):
- '''Reads a varint from the file.
-
- When the first byte to be read indicates EOF, (0, 0) is returned. When an
- EOF occurs when at least one byte has been read, an EOFError exception is
- raised.
-
- @param infile: the file-like object to read from. It should have a read()
- method.
- @returns (varint, length), the read varint and the number of read bytes.
- '''
-
- varint = 0
- read_bytes = 0
-
- while True:
- char = infile.read(1)
- if len(char) == 0:
- if read_bytes == 0:
- return (0, 0)
- raise EOFError('EOF while reading varint, value is %i so far' %
- varint)
-
- byte = ord(char)
- varint += (byte & 0x7F) << (7 * read_bytes)
-
- read_bytes += 1
-
- if not byte & 0x80:
- return (varint, read_bytes)
-
-
-def write_varint(outfile, value):
- '''Writes a varint to a file.
-
- @param outfile: the file-like object to write to. It should have a write()
- method.
- @returns the number of written bytes.
- '''
-
- # there is a big difference between 'write the value 0' (this case) and
- # 'there is nothing left to write' (the false-case of the while loop)
-
- if value == 0:
- outfile.write(ZERO_BYTE)
- return 1
-
- written_bytes = 0
- while value > 0:
- to_write = value & 0x7f
- value = value >> 7
-
- if value > 0:
- to_write |= 0x80
-
- outfile.write(byte(to_write))
- written_bytes += 1
-
- return written_bytes
-
-
-def yield_varblocks(infile):
- '''Generator, yields each block in the input file.
-
- @param infile: file to read, is expected to have the VARBLOCK format as
- described in the module's docstring.
- @yields the contents of each block.
- '''
-
- # Check the version number
- first_char = infile.read(1)
- if len(first_char) == 0:
- raise EOFError('Unable to read VARBLOCK version number')
-
- version = ord(first_char)
- if version != VARBLOCK_VERSION:
- raise ValueError('VARBLOCK version %i not supported' % version)
-
- while True:
- (block_size, read_bytes) = read_varint(infile)
-
- # EOF at block boundary, that's fine.
- if read_bytes == 0 and block_size == 0:
- break
-
- block = infile.read(block_size)
-
- read_size = len(block)
- if read_size != block_size:
- raise EOFError('Block size is %i, but could read only %i bytes' %
- (block_size, read_size))
-
- yield block
-
-
-def yield_fixedblocks(infile, blocksize):
- '''Generator, yields each block of ``blocksize`` bytes in the input file.
-
- :param infile: file to read and separate in blocks.
- :returns: a generator that yields the contents of each block
- '''
-
- while True:
- block = infile.read(blocksize)
-
- read_bytes = len(block)
- if read_bytes == 0:
- break
-
- yield block
-
- if read_bytes < blocksize:
- break
-
diff --git a/python-packages/securesync/engine/__init__.py b/python-packages/securesync/engine/__init__.py
deleted file mode 100755
index e69de29bb2..0000000000
diff --git a/python-packages/securesync/management/__init__.py b/python-packages/securesync/management/__init__.py
deleted file mode 100755
index e69de29bb2..0000000000
diff --git a/python-packages/securesync/management/commands/__init__.py b/python-packages/securesync/management/commands/__init__.py
deleted file mode 100755
index e69de29bb2..0000000000
diff --git a/python-packages/securesync/migrations/__init__.py b/python-packages/securesync/migrations/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/securesync/settings.py b/python-packages/securesync/settings.py
deleted file mode 100644
index 7d39443496..0000000000
--- a/python-packages/securesync/settings.py
+++ /dev/null
@@ -1,19 +0,0 @@
-try:
- from kalite import local_settings
-except ImportError:
- local_settings = object()
-
-#######################
-# Set module settings
-#######################
-
-SYNCING_THROTTLE_WAIT_TIME = getattr(local_settings, "SYNCING_THROTTLE_WAIT_TIME", None) # default: don't throttle syncing
-
-SYNCING_MAX_RECORDS_PER_REQUEST = getattr(local_settings, "SYNCING_MAX_RECORDS_PER_REQUEST", 100) # 100 records per http request
-
-# Here, None === no limit
-SYNC_SESSIONS_MAX_RECORDS = getattr(local_settings, "SYNC_SESSIONS_MAX_RECORDS", 10)
-
-SHOW_DELETED_OBJECTS = getattr(local_settings, "SHOW_DELETED_OBJECTS", False)
-
-DEBUG_ALLOW_DELETIONS = getattr(local_settings, "DEBUG_ALLOW_DELETIONS", False)
diff --git a/python-packages/slugify/__init__.py b/python-packages/slugify/__init__.py
deleted file mode 100644
index a0b22498fb..0000000000
--- a/python-packages/slugify/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-__version__ = '0.1.0'
-
-from slugify import *
diff --git a/python-packages/slugify/slugify.py b/python-packages/slugify/slugify.py
deleted file mode 100644
index ba152746dc..0000000000
--- a/python-packages/slugify/slugify.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# -*- coding: utf-8 -*-
-
-__all__ = ['slugify']
-
-import re
-import unicodedata
-import types
-import sys
-from htmlentitydefs import name2codepoint
-from unidecode import unidecode
-
-# character entity reference
-CHAR_ENTITY_REXP = re.compile('&(%s);' % '|'.join(name2codepoint))
-
-# decimal character reference
-DECIMAL_REXP = re.compile('(\d+);')
-
-# hexadecimal character reference
-HEX_REXP = re.compile('([\da-fA-F]+);')
-
-REPLACE1_REXP = re.compile(r'[\']+')
-REPLACE2_REXP = re.compile(r'[^-a-z0-9]+')
-REMOVE_REXP = re.compile('-{2,}')
-
-
-def smart_truncate(string, max_length=0, word_boundaries=False, separator=' '):
- """ Truncate a string """
-
- string = string.strip(separator)
-
- if not max_length:
- return string
-
- if len(string) < max_length:
- return string
-
- if not word_boundaries:
- return string[:max_length].strip(separator)
-
- if separator not in string:
- return string[:max_length]
-
- truncated = ''
- for word in string.split(separator):
- if word:
- next_len = len(truncated) + len(word) + len(separator)
- if next_len <= max_length:
- truncated += '{0}{1}'.format(word, separator)
- if not truncated:
- truncated = string[:max_length]
- return truncated.strip(separator)
-
-
-def slugify(text, entities=True, decimal=True, hexadecimal=True, max_length=0, word_boundary=False, separator='-'):
- """ Make a slug from the given text """
-
- # text to unicode
- if not isinstance(text, types.UnicodeType):
- text = unicode(text, 'utf-8', 'ignore')
-
- # decode unicode ( 影師嗎 = Ying Shi Ma)
- text = unidecode(text)
-
- # text back to unicode
- if not isinstance(text, types.UnicodeType):
- text = unicode(text, 'utf-8', 'ignore')
-
- # character entity reference
- if entities:
- text = CHAR_ENTITY_REXP.sub(lambda m: unichr(name2codepoint[m.group(1)]), text)
-
- # decimal character reference
- if decimal:
- try:
- text = DECIMAL_REXP.sub(lambda m: unichr(int(m.group(1))), text)
- except:
- pass
-
- # hexadecimal character reference
- if hexadecimal:
- try:
- text = HEX_REXP.sub(lambda m: unichr(int(m.group(1), 16)), text)
- except:
- pass
-
- # translate
- text = unicodedata.normalize('NFKD', text)
- if sys.version_info < (3,):
- text = text.encode('ascii', 'ignore')
-
- # replace unwanted characters
- text = REPLACE1_REXP.sub('', text.lower()) # replace ' with nothing instead with -
- text = REPLACE2_REXP.sub('-', text.lower())
-
- # remove redundant -
- text = REMOVE_REXP.sub('-', text).strip('-')
-
- # smart truncate if requested
- if max_length > 0:
- text = smart_truncate(text, max_length, word_boundary, '-')
-
- if separator != '-':
- text = text.replace('-', separator)
-
- return text
-
-
-def main():
- if len(sys.argv) < 2:
- print "Usage %s TEXT TO SLUGIFY" % sys.argv[0]
- return
- text = ' '.join(sys.argv[1:])
- print slugify(text)
diff --git a/python-packages/smmap/__init__.py b/python-packages/smmap/__init__.py
deleted file mode 100644
index a10cd5c99d..0000000000
--- a/python-packages/smmap/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-"""Intialize the smmap package"""
-
-__author__ = "Sebastian Thiel"
-__contact__ = "byronimo@gmail.com"
-__homepage__ = "https://github.com/Byron/smmap"
-version_info = (0, 8, 2)
-__version__ = '.'.join(str(i) for i in version_info)
-
-# make everything available in root package for convenience
-from mman import *
-from buf import *
diff --git a/python-packages/smmap/buf.py b/python-packages/smmap/buf.py
deleted file mode 100644
index 255c6b54d8..0000000000
--- a/python-packages/smmap/buf.py
+++ /dev/null
@@ -1,134 +0,0 @@
-"""Module with a simple buffer implementation using the memory manager"""
-from mman import WindowCursor
-
-import sys
-
-__all__ = ["SlidingWindowMapBuffer"]
-
-class SlidingWindowMapBuffer(object):
- """A buffer like object which allows direct byte-wise object and slicing into
- memory of a mapped file. The mapping is controlled by the provided cursor.
-
- The buffer is relative, that is if you map an offset, index 0 will map to the
- first byte at the offset you used during initialization or begin_access
-
- **Note:** Although this type effectively hides the fact that there are mapped windows
- underneath, it can unfortunately not be used in any non-pure python method which
- needs a buffer or string"""
- __slots__ = (
- '_c', # our cursor
- '_size', # our supposed size
- )
-
-
- def __init__(self, cursor = None, offset = 0, size = sys.maxint, flags = 0):
- """Initalize the instance to operate on the given cursor.
- :param cursor: if not None, the associated cursor to the file you want to access
- If None, you have call begin_access before using the buffer and provide a cursor
- :param offset: absolute offset in bytes
- :param size: the total size of the mapping. Defaults to the maximum possible size
- From that point on, the __len__ of the buffer will be the given size or the file size.
- If the size is larger than the mappable area, you can only access the actually available
- area, although the length of the buffer is reported to be your given size.
- Hence it is in your own interest to provide a proper size !
- :param flags: Additional flags to be passed to os.open
- :raise ValueError: if the buffer could not achieve a valid state"""
- self._c = cursor
- if cursor and not self.begin_access(cursor, offset, size, flags):
- raise ValueError("Failed to allocate the buffer - probably the given offset is out of bounds")
- # END handle offset
-
- def __del__(self):
- self.end_access()
-
- def __len__(self):
- return self._size
-
- def __getitem__(self, i):
- c = self._c
- assert c.is_valid()
- if i < 0:
- i = self._size + i
- if not c.includes_ofs(i):
- c.use_region(i, 1)
- # END handle region usage
- return c.buffer()[i-c.ofs_begin()]
-
- def __getslice__(self, i, j):
- c = self._c
- # fast path, slice fully included - safes a concatenate operation and
- # should be the default
- assert c.is_valid()
- if i < 0:
- i = self._size + i
- if j == sys.maxint:
- j = self._size
- if j < 0:
- j = self._size + j
- if (c.ofs_begin() <= i) and (j < c.ofs_end()):
- b = c.ofs_begin()
- return c.buffer()[i-b:j-b]
- else:
- l = j-i # total length
- ofs = i
- # Keeping tokens in a list could possible be faster, but the list
- # overhead outweighs the benefits (tested) !
- md = str()
- while l:
- c.use_region(ofs, l)
- assert c.is_valid()
- d = c.buffer()[:l]
- ofs += len(d)
- l -= len(d)
- md += d
- #END while there are bytes to read
- return md
- # END fast or slow path
- #{ Interface
-
- def begin_access(self, cursor = None, offset = 0, size = sys.maxint, flags = 0):
- """Call this before the first use of this instance. The method was already
- called by the constructor in case sufficient information was provided.
-
- For more information no the parameters, see the __init__ method
- :param path: if cursor is None the existing one will be used.
- :return: True if the buffer can be used"""
- if cursor:
- self._c = cursor
- #END update our cursor
-
- # reuse existing cursors if possible
- if self._c is not None and self._c.is_associated():
- res = self._c.use_region(offset, size, flags).is_valid()
- if res:
- # if given size is too large or default, we computer a proper size
- # If its smaller, we assume the combination between offset and size
- # as chosen by the user is correct and use it !
- # If not, the user is in trouble.
- if size > self._c.file_size():
- size = self._c.file_size() - offset
- #END handle size
- self._size = size
- #END set size
- return res
- # END use our cursor
- return False
-
- def end_access(self):
- """Call this method once you are done using the instance. It is automatically
- called on destruction, and should be called just in time to allow system
- resources to be freed.
-
- Once you called end_access, you must call begin access before reusing this instance!"""
- self._size = 0
- if self._c is not None:
- self._c.unuse_region()
- #END unuse region
-
- def cursor(self):
- """:return: the currently set cursor which provides access to the data"""
- return self._c
-
- #}END interface
-
-
diff --git a/python-packages/smmap/exc.py b/python-packages/smmap/exc.py
deleted file mode 100644
index f0ed7dcd84..0000000000
--- a/python-packages/smmap/exc.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""Module with system exceptions"""
-
-class MemoryManagerError(Exception):
- """Base class for all exceptions thrown by the memory manager"""
-
-class RegionCollectionError(MemoryManagerError):
- """Thrown if a memory region could not be collected, or if no region for collection was found"""
diff --git a/python-packages/smmap/mman.py b/python-packages/smmap/mman.py
deleted file mode 100644
index 97c42c5bb4..0000000000
--- a/python-packages/smmap/mman.py
+++ /dev/null
@@ -1,581 +0,0 @@
-"""Module containnig a memory memory manager which provides a sliding window on a number of memory mapped files"""
-from util import (
- MapWindow,
- MapRegion,
- MapRegionList,
- is_64_bit,
- align_to_mmap
- )
-
-from weakref import ref
-import sys
-from sys import getrefcount
-
-__all__ = ["StaticWindowMapManager", "SlidingWindowMapManager", "WindowCursor"]
-#{ Utilities
-
-#}END utilities
-
-
-class WindowCursor(object):
- """
- Pointer into the mapped region of the memory manager, keeping the map
- alive until it is destroyed and no other client uses it.
-
- Cursors should not be created manually, but are instead returned by the SlidingWindowMapManager
-
- **Note:**: The current implementation is suited for static and sliding window managers, but it also means
- that it must be suited for the somewhat quite different sliding manager. It could be improved, but
- I see no real need to do so."""
- __slots__ = (
- '_manager', # the manger keeping all file regions
- '_rlist', # a regions list with regions for our file
- '_region', # our current region or None
- '_ofs', # relative offset from the actually mapped area to our start area
- '_size' # maximum size we should provide
- )
-
- def __init__(self, manager = None, regions = None):
- self._manager = manager
- self._rlist = regions
- self._region = None
- self._ofs = 0
- self._size = 0
-
- def __del__(self):
- self._destroy()
-
- def _destroy(self):
- """Destruction code to decrement counters"""
- self.unuse_region()
-
- if self._rlist is not None:
- # Actual client count, which doesn't include the reference kept by the manager, nor ours
- # as we are about to be deleted
- try:
- num_clients = self._rlist.client_count() - 2
- if num_clients == 0 and len(self._rlist) == 0:
- # Free all resources associated with the mapped file
- self._manager._fdict.pop(self._rlist.path_or_fd())
- # END remove regions list from manager
- except TypeError:
- # sometimes, during shutdown, getrefcount is None. Its possible
- # to re-import it, however, its probably better to just ignore
- # this python problem (for now).
- # The next step is to get rid of the error prone getrefcount alltogether.
- pass
- #END exception handling
- #END handle regions
-
- def _copy_from(self, rhs):
- """Copy all data from rhs into this instance, handles usage count"""
- self._manager = rhs._manager
- self._rlist = rhs._rlist
- self._region = rhs._region
- self._ofs = rhs._ofs
- self._size = rhs._size
-
- if self._region is not None:
- self._region.increment_usage_count()
- # END handle regions
-
- def __copy__(self):
- """copy module interface"""
- cpy = type(self)()
- cpy._copy_from(self)
- return cpy
-
- #{ Interface
- def assign(self, rhs):
- """Assign rhs to this instance. This is required in order to get a real copy.
- Alternativly, you can copy an existing instance using the copy module"""
- self._destroy()
- self._copy_from(rhs)
-
- def use_region(self, offset = 0, size = 0, flags = 0):
- """Assure we point to a window which allows access to the given offset into the file
-
- :param offset: absolute offset in bytes into the file
- :param size: amount of bytes to map. If 0, all available bytes will be mapped
- :param flags: additional flags to be given to os.open in case a file handle is initially opened
- for mapping. Has no effect if a region can actually be reused.
- :return: this instance - it should be queried for whether it points to a valid memory region.
- This is not the case if the mapping failed becaues we reached the end of the file
-
- **Note:**: The size actually mapped may be smaller than the given size. If that is the case,
- either the file has reached its end, or the map was created between two existing regions"""
- need_region = True
- man = self._manager
- fsize = self._rlist.file_size()
- size = min(size or fsize, man.window_size() or fsize) # clamp size to window size
-
- if self._region is not None:
- if self._region.includes_ofs(offset):
- need_region = False
- else:
- self.unuse_region()
- # END handle existing region
- # END check existing region
-
- # offset too large ?
- if offset >= fsize:
- return self
- #END handle offset
-
- if need_region:
- self._region = man._obtain_region(self._rlist, offset, size, flags, False)
- #END need region handling
-
- self._region.increment_usage_count()
- self._ofs = offset - self._region._b
- self._size = min(size, self._region.ofs_end() - offset)
-
- return self
-
- def unuse_region(self):
- """Unuse the ucrrent region. Does nothing if we have no current region
-
- **Note:** the cursor unuses the region automatically upon destruction. It is recommended
- to unuse the region once you are done reading from it in persistent cursors as it
- helps to free up resource more quickly"""
- self._region = None
- # note: should reset ofs and size, but we spare that for performance. Its not
- # allowed to query information if we are not valid !
-
- def buffer(self):
- """Return a buffer object which allows access to our memory region from our offset
- to the window size. Please note that it might be smaller than you requested when calling use_region()
-
- **Note:** You can only obtain a buffer if this instance is_valid() !
-
- **Note:** buffers should not be cached passed the duration of your access as it will
- prevent resources from being freed even though they might not be accounted for anymore !"""
- return buffer(self._region.buffer(), self._ofs, self._size)
-
- def map(self):
- """
- :return: the underlying raw memory map. Please not that the offset and size is likely to be different
- to what you set as offset and size. Use it only if you are sure about the region it maps, which is the whole
- file in case of StaticWindowMapManager"""
- return self._region.map()
-
- def is_valid(self):
- """:return: True if we have a valid and usable region"""
- return self._region is not None
-
- def is_associated(self):
- """:return: True if we are associated with a specific file already"""
- return self._rlist is not None
-
- def ofs_begin(self):
- """:return: offset to the first byte pointed to by our cursor
-
- **Note:** only if is_valid() is True"""
- return self._region._b + self._ofs
-
- def ofs_end(self):
- """:return: offset to one past the last available byte"""
- # unroll method calls for performance !
- return self._region._b + self._ofs + self._size
-
- def size(self):
- """:return: amount of bytes we point to"""
- return self._size
-
- def region_ref(self):
- """:return: weak ref to our mapped region.
- :raise AssertionError: if we have no current region. This is only useful for debugging"""
- if self._region is None:
- raise AssertionError("region not set")
- return ref(self._region)
-
- def includes_ofs(self, ofs):
- """:return: True if the given absolute offset is contained in the cursors
- current region
-
- **Note:** cursor must be valid for this to work"""
- # unroll methods
- return (self._region._b + self._ofs) <= ofs < (self._region._b + self._ofs + self._size)
-
- def file_size(self):
- """:return: size of the underlying file"""
- return self._rlist.file_size()
-
- def path_or_fd(self):
- """:return: path or file decriptor of the underlying mapped file"""
- return self._rlist.path_or_fd()
-
- def path(self):
- """:return: path of the underlying mapped file
- :raise ValueError: if attached path is not a path"""
- if isinstance(self._rlist.path_or_fd(), int):
- raise ValueError("Path queried although mapping was applied to a file descriptor")
- # END handle type
- return self._rlist.path_or_fd()
-
- def fd(self):
- """:return: file descriptor used to create the underlying mapping.
-
- **Note:** it is not required to be valid anymore
- :raise ValueError: if the mapping was not created by a file descriptor"""
- if isinstance(self._rlist.path_or_fd(), basestring):
- raise ValueError("File descriptor queried although mapping was generated from path")
- #END handle type
- return self._rlist.path_or_fd()
-
- #} END interface
-
-
-class StaticWindowMapManager(object):
- """Provides a manager which will produce single size cursors that are allowed
- to always map the whole file.
-
- Clients must be written to specifically know that they are accessing their data
- through a StaticWindowMapManager, as they otherwise have to deal with their window size.
-
- These clients would have to use a SlidingWindowMapBuffer to hide this fact.
-
- This type will always use a maximum window size, and optimize certain methods to
- acomodate this fact"""
-
- __slots__ = [
- '_fdict', # mapping of path -> StorageHelper (of some kind
- '_window_size', # maximum size of a window
- '_max_memory_size', # maximum amount ofmemory we may allocate
- '_max_handle_count', # maximum amount of handles to keep open
- '_memory_size', # currently allocated memory size
- '_handle_count', # amount of currently allocated file handles
- ]
-
- #{ Configuration
- MapRegionListCls = MapRegionList
- MapWindowCls = MapWindow
- MapRegionCls = MapRegion
- WindowCursorCls = WindowCursor
- #} END configuration
-
- _MB_in_bytes = 1024 * 1024
-
- def __init__(self, window_size = 0, max_memory_size = 0, max_open_handles = sys.maxint):
- """initialize the manager with the given parameters.
- :param window_size: if -1, a default window size will be chosen depending on
- the operating system's architechture. It will internally be quantified to a multiple of the page size
- If 0, the window may have any size, which basically results in mapping the whole file at one
- :param max_memory_size: maximum amount of memory we may map at once before releasing mapped regions.
- If 0, a viable default iwll be set dependning on the system's architecture.
- It is a soft limit that is tried to be kept, but nothing bad happens if we have to overallocate
- :param max_open_handles: if not maxin, limit the amount of open file handles to the given number.
- Otherwise the amount is only limited by the system iteself. If a system or soft limit is hit,
- the manager will free as many handles as posisble"""
- self._fdict = dict()
- self._window_size = window_size
- self._max_memory_size = max_memory_size
- self._max_handle_count = max_open_handles
- self._memory_size = 0
- self._handle_count = 0
-
- if window_size < 0:
- coeff = 32
- if is_64_bit():
- coeff = 1024
- #END handle arch
- self._window_size = coeff * self._MB_in_bytes
- # END handle max window size
-
- if max_memory_size == 0:
- coeff = 512
- if is_64_bit():
- coeff = 8192
- #END handle arch
- self._max_memory_size = coeff * self._MB_in_bytes
- #END handle max memory size
-
- #{ Internal Methods
-
- def _collect_lru_region(self, size):
- """Unmap the region which was least-recently used and has no client
- :param size: size of the region we want to map next (assuming its not already mapped partially or full
- if 0, we try to free any available region
- :return: Amount of freed regions
-
- **Note:** We don't raise exceptions anymore, in order to keep the system working, allowing temporary overallocation.
- If the system runs out of memory, it will tell.
-
- **todo:** implement a case where all unusued regions are discarded efficiently. Currently its only brute force"""
- num_found = 0
- while (size == 0) or (self._memory_size + size > self._max_memory_size):
- lru_region = None
- lru_list = None
- for regions in self._fdict.itervalues():
- for region in regions:
- # check client count - consider that we keep one reference ourselves !
- if (region.client_count()-2 == 0 and
- (lru_region is None or region._uc < lru_region._uc)):
- lru_region = region
- lru_list = regions
- # END update lru_region
- #END for each region
- #END for each regions list
-
- if lru_region is None:
- break
- #END handle region not found
-
- num_found += 1
- del(lru_list[lru_list.index(lru_region)])
- self._memory_size -= lru_region.size()
- self._handle_count -= 1
- #END while there is more memory to free
- return num_found
-
- def _obtain_region(self, a, offset, size, flags, is_recursive):
- """Utilty to create a new region - for more information on the parameters,
- see MapCursor.use_region.
- :param a: A regions (a)rray
- :return: The newly created region"""
- if self._memory_size + size > self._max_memory_size:
- self._collect_lru_region(size)
- #END handle collection
-
- r = None
- if a:
- assert len(a) == 1
- r = a[0]
- else:
- try:
- r = self.MapRegionCls(a.path_or_fd(), 0, sys.maxint, flags)
- except Exception:
- # apparently we are out of system resources or hit a limit
- # As many more operations are likely to fail in that condition (
- # like reading a file from disk, etc) we free up as much as possible
- # As this invalidates our insert position, we have to recurse here
- # NOTE: The c++ version uses a linked list to curcumvent this, but
- # using that in python is probably too slow anyway
- if is_recursive:
- # we already tried this, and still have no success in obtaining
- # a mapping. This is an exception, so we propagate it
- raise
- #END handle existing recursion
- self._collect_lru_region(0)
- return self._obtain_region(a, offset, size, flags, True)
- #END handle exceptions
-
- self._handle_count += 1
- self._memory_size += r.size()
- a.append(r)
- # END handle array
-
- assert r.includes_ofs(offset)
- return r
-
- #}END internal methods
-
- #{ Interface
- def make_cursor(self, path_or_fd):
- """
- :return: a cursor pointing to the given path or file descriptor.
- It can be used to map new regions of the file into memory
-
- **Note:** if a file descriptor is given, it is assumed to be open and valid,
- but may be closed afterwards. To refer to the same file, you may reuse
- your existing file descriptor, but keep in mind that new windows can only
- be mapped as long as it stays valid. This is why the using actual file paths
- are preferred unless you plan to keep the file descriptor open.
-
- **Note:** file descriptors are problematic as they are not necessarily unique, as two
- different files opened and closed in succession might have the same file descriptor id.
-
- **Note:** Using file descriptors directly is faster once new windows are mapped as it
- prevents the file to be opened again just for the purpose of mapping it."""
- regions = self._fdict.get(path_or_fd)
- if regions is None:
- regions = self.MapRegionListCls(path_or_fd)
- self._fdict[path_or_fd] = regions
- # END obtain region for path
- return self.WindowCursorCls(self, regions)
-
- def collect(self):
- """Collect all available free-to-collect mapped regions
- :return: Amount of freed handles"""
- return self._collect_lru_region(0)
-
- def num_file_handles(self):
- """:return: amount of file handles in use. Each mapped region uses one file handle"""
- return self._handle_count
-
- def num_open_files(self):
- """Amount of opened files in the system"""
- return reduce(lambda x,y: x+y, (1 for rlist in self._fdict.itervalues() if len(rlist) > 0), 0)
-
- def window_size(self):
- """:return: size of each window when allocating new regions"""
- return self._window_size
-
- def mapped_memory_size(self):
- """:return: amount of bytes currently mapped in total"""
- return self._memory_size
-
- def max_file_handles(self):
- """:return: maximium amount of handles we may have opened"""
- return self._max_handle_count
-
- def max_mapped_memory_size(self):
- """:return: maximum amount of memory we may allocate"""
- return self._max_memory_size
-
- #} END interface
-
- #{ Special Purpose Interface
-
- def force_map_handle_removal_win(self, base_path):
- """ONLY AVAILABLE ON WINDOWS
- On windows removing files is not allowed if anybody still has it opened.
- If this process is ourselves, and if the whole process uses this memory
- manager (as far as the parent framework is concerned) we can enforce
- closing all memory maps whose path matches the given base path to
- allow the respective operation after all.
- The respective system must NOT access the closed memory regions anymore !
- This really may only be used if you know that the items which keep
- the cursors alive will not be using it anymore. They need to be recreated !
- :return: Amount of closed handles
-
- **Note:** does nothing on non-windows platforms"""
- if sys.platform != 'win32':
- return
- #END early bailout
-
- num_closed = 0
- for path, rlist in self._fdict.iteritems():
- if path.startswith(base_path):
- for region in rlist:
- region._mf.close()
- num_closed += 1
- #END path matches
- #END for each path
- return num_closed
- #} END special purpose interface
-
-
-
-class SlidingWindowMapManager(StaticWindowMapManager):
- """Maintains a list of ranges of mapped memory regions in one or more files and allows to easily
- obtain additional regions assuring there is no overlap.
- Once a certain memory limit is reached globally, or if there cannot be more open file handles
- which result from each mmap call, the least recently used, and currently unused mapped regions
- are unloaded automatically.
-
- **Note:** currently not thread-safe !
-
- **Note:** in the current implementation, we will automatically unload windows if we either cannot
- create more memory maps (as the open file handles limit is hit) or if we have allocated more than
- a safe amount of memory already, which would possibly cause memory allocations to fail as our address
- space is full."""
-
- __slots__ = tuple()
-
- def __init__(self, window_size = -1, max_memory_size = 0, max_open_handles = sys.maxint):
- """Adjusts the default window size to -1"""
- super(SlidingWindowMapManager, self).__init__(window_size, max_memory_size, max_open_handles)
-
- def _obtain_region(self, a, offset, size, flags, is_recursive):
- # bisect to find an existing region. The c++ implementation cannot
- # do that as it uses a linked list for regions.
- r = None
- lo = 0
- hi = len(a)
- while lo < hi:
- mid = (lo+hi)//2
- ofs = a[mid]._b
- if ofs <= offset:
- if a[mid].includes_ofs(offset):
- r = a[mid]
- break
- #END have region
- lo = mid+1
- else:
- hi = mid
- #END handle position
- #END while bisecting
-
- if r is None:
- window_size = self._window_size
- left = self.MapWindowCls(0, 0)
- mid = self.MapWindowCls(offset, size)
- right = self.MapWindowCls(a.file_size(), 0)
-
- # we want to honor the max memory size, and assure we have anough
- # memory available
- # Save calls !
- if self._memory_size + window_size > self._max_memory_size:
- self._collect_lru_region(window_size)
- #END handle collection
-
- # we assume the list remains sorted by offset
- insert_pos = 0
- len_regions = len(a)
- if len_regions == 1:
- if a[0]._b <= offset:
- insert_pos = 1
- #END maintain sort
- else:
- # find insert position
- insert_pos = len_regions
- for i, region in enumerate(a):
- if region._b > offset:
- insert_pos = i
- break
- #END if insert position is correct
- #END for each region
- # END obtain insert pos
-
- # adjust the actual offset and size values to create the largest
- # possible mapping
- if insert_pos == 0:
- if len_regions:
- right = self.MapWindowCls.from_region(a[insert_pos])
- #END adjust right side
- else:
- if insert_pos != len_regions:
- right = self.MapWindowCls.from_region(a[insert_pos])
- # END adjust right window
- left = self.MapWindowCls.from_region(a[insert_pos - 1])
- #END adjust surrounding windows
-
- mid.extend_left_to(left, window_size)
- mid.extend_right_to(right, window_size)
- mid.align()
-
- # it can happen that we align beyond the end of the file
- if mid.ofs_end() > right.ofs:
- mid.size = right.ofs - mid.ofs
- #END readjust size
-
- # insert new region at the right offset to keep the order
- try:
- if self._handle_count >= self._max_handle_count:
- raise Exception
- #END assert own imposed max file handles
- r = self.MapRegionCls(a.path_or_fd(), mid.ofs, mid.size, flags)
- except Exception:
- # apparently we are out of system resources or hit a limit
- # As many more operations are likely to fail in that condition (
- # like reading a file from disk, etc) we free up as much as possible
- # As this invalidates our insert position, we have to recurse here
- # NOTE: The c++ version uses a linked list to curcumvent this, but
- # using that in python is probably too slow anyway
- if is_recursive:
- # we already tried this, and still have no success in obtaining
- # a mapping. This is an exception, so we propagate it
- raise
- #END handle existing recursion
- self._collect_lru_region(0)
- return self._obtain_region(a, offset, size, flags, True)
- #END handle exceptions
-
- self._handle_count += 1
- self._memory_size += r.size()
- a.insert(insert_pos, r)
- # END create new region
- return r
-
-
diff --git a/python-packages/smmap/util.py b/python-packages/smmap/util.py
deleted file mode 100644
index c6710b3fec..0000000000
--- a/python-packages/smmap/util.py
+++ /dev/null
@@ -1,269 +0,0 @@
-"""Module containnig a memory memory manager which provides a sliding window on a number of memory mapped files"""
-import os
-import sys
-import mmap
-
-from mmap import mmap, ACCESS_READ
-try:
- from mmap import ALLOCATIONGRANULARITY
-except ImportError:
- # in python pre 2.6, the ALLOCATIONGRANULARITY does not exist as it is mainly
- # useful for aligning the offset. The offset argument doesn't exist there though
- from mmap import PAGESIZE as ALLOCATIONGRANULARITY
-#END handle pythons missing quality assurance
-
-from sys import getrefcount
-
-__all__ = [ "align_to_mmap", "is_64_bit",
- "MapWindow", "MapRegion", "MapRegionList", "ALLOCATIONGRANULARITY"]
-
-#{ Utilities
-
-def align_to_mmap(num, round_up):
- """
- Align the given integer number to the closest page offset, which usually is 4096 bytes.
-
- :param round_up: if True, the next higher multiple of page size is used, otherwise
- the lower page_size will be used (i.e. if True, 1 becomes 4096, otherwise it becomes 0)
- :return: num rounded to closest page"""
- res = (num / ALLOCATIONGRANULARITY) * ALLOCATIONGRANULARITY;
- if round_up and (res != num):
- res += ALLOCATIONGRANULARITY
- #END handle size
- return res;
-
-def is_64_bit():
- """:return: True if the system is 64 bit. Otherwise it can be assumed to be 32 bit"""
- return sys.maxint > (1<<32) - 1
-
-#}END utilities
-
-
-#{ Utility Classes
-
-class MapWindow(object):
- """Utility type which is used to snap windows towards each other, and to adjust their size"""
- __slots__ = (
- 'ofs', # offset into the file in bytes
- 'size' # size of the window in bytes
- )
-
- def __init__(self, offset, size):
- self.ofs = offset
- self.size = size
-
- def __repr__(self):
- return "MapWindow(%i, %i)" % (self.ofs, self.size)
-
- @classmethod
- def from_region(cls, region):
- """:return: new window from a region"""
- return cls(region._b, region.size())
-
- def ofs_end(self):
- return self.ofs + self.size
-
- def align(self):
- """Assures the previous window area is contained in the new one"""
- nofs = align_to_mmap(self.ofs, 0)
- self.size += self.ofs - nofs # keep size constant
- self.ofs = nofs
- self.size = align_to_mmap(self.size, 1)
-
- def extend_left_to(self, window, max_size):
- """Adjust the offset to start where the given window on our left ends if possible,
- but don't make yourself larger than max_size.
- The resize will assure that the new window still contains the old window area"""
- rofs = self.ofs - window.ofs_end()
- nsize = rofs + self.size
- rofs -= nsize - min(nsize, max_size)
- self.ofs = self.ofs - rofs
- self.size += rofs
-
- def extend_right_to(self, window, max_size):
- """Adjust the size to make our window end where the right window begins, but don't
- get larger than max_size"""
- self.size = min(self.size + (window.ofs - self.ofs_end()), max_size)
-
-
-class MapRegion(object):
- """Defines a mapped region of memory, aligned to pagesizes
-
- **Note:** deallocates used region automatically on destruction"""
- __slots__ = [
- '_b' , # beginning of mapping
- '_mf', # mapped memory chunk (as returned by mmap)
- '_uc', # total amount of usages
- '_size', # cached size of our memory map
- '__weakref__'
- ]
- _need_compat_layer = sys.version_info[1] < 6
-
- if _need_compat_layer:
- __slots__.append('_mfb') # mapped memory buffer to provide offset
- #END handle additional slot
-
- #{ Configuration
- # Used for testing only. If True, all data will be loaded into memory at once.
- # This makes sure no file handles will remain open.
- _test_read_into_memory = False
- #} END configuration
-
-
- def __init__(self, path_or_fd, ofs, size, flags = 0):
- """Initialize a region, allocate the memory map
- :param path_or_fd: path to the file to map, or the opened file descriptor
- :param ofs: **aligned** offset into the file to be mapped
- :param size: if size is larger then the file on disk, the whole file will be
- allocated the the size automatically adjusted
- :param flags: additional flags to be given when opening the file.
- :raise Exception: if no memory can be allocated"""
- self._b = ofs
- self._size = 0
- self._uc = 0
-
- if isinstance(path_or_fd, int):
- fd = path_or_fd
- else:
- fd = os.open(path_or_fd, os.O_RDONLY|getattr(os, 'O_BINARY', 0)|flags)
- #END handle fd
-
- try:
- kwargs = dict(access=ACCESS_READ, offset=ofs)
- corrected_size = size
- sizeofs = ofs
- if self._need_compat_layer:
- del(kwargs['offset'])
- corrected_size += ofs
- sizeofs = 0
- # END handle python not supporting offset ! Arg
-
- # have to correct size, otherwise (instead of the c version) it will
- # bark that the size is too large ... many extra file accesses because
- # if this ... argh !
- actual_size = min(os.fstat(fd).st_size - sizeofs, corrected_size)
- if self._test_read_into_memory:
- self._mf = self._read_into_memory(fd, ofs, actual_size)
- else:
- self._mf = mmap(fd, actual_size, **kwargs)
- #END handle memory mode
-
- self._size = len(self._mf)
-
- if self._need_compat_layer:
- self._mfb = buffer(self._mf, ofs, self._size)
- #END handle buffer wrapping
- finally:
- if isinstance(path_or_fd, basestring):
- os.close(fd)
- #END only close it if we opened it
- #END close file handle
-
- def _read_into_memory(self, fd, offset, size):
- """:return: string data as read from the given file descriptor, offset and size """
- os.lseek(fd, offset, os.SEEK_SET)
- mf = ''
- bytes_todo = size
- while bytes_todo:
- chunk = 1024*1024
- d = os.read(fd, chunk)
- bytes_todo -= len(d)
- mf += d
- #END loop copy items
- return mf
-
- def __repr__(self):
- return "MapRegion<%i, %i>" % (self._b, self.size())
-
- #{ Interface
-
- def buffer(self):
- """:return: a buffer containing the memory"""
- return self._mf
-
- def map(self):
- """:return: a memory map containing the memory"""
- return self._mf
-
- def ofs_begin(self):
- """:return: absolute byte offset to the first byte of the mapping"""
- return self._b
-
- def size(self):
- """:return: total size of the mapped region in bytes"""
- return self._size
-
- def ofs_end(self):
- """:return: Absolute offset to one byte beyond the mapping into the file"""
- return self._b + self._size
-
- def includes_ofs(self, ofs):
- """:return: True if the given offset can be read in our mapped region"""
- return self._b <= ofs < self._b + self._size
-
- def client_count(self):
- """:return: number of clients currently using this region"""
- # -1: self on stack, -1 self in this method, -1 self in getrefcount
- return getrefcount(self)-3
-
- def usage_count(self):
- """:return: amount of usages so far"""
- return self._uc
-
- def increment_usage_count(self):
- """Adjust the usage count by the given positive or negative offset"""
- self._uc += 1
-
- # re-define all methods which need offset adjustments in compatibility mode
- if _need_compat_layer:
- def size(self):
- return self._size - self._b
-
- def ofs_end(self):
- # always the size - we are as large as it gets
- return self._size
-
- def buffer(self):
- return self._mfb
-
- def includes_ofs(self, ofs):
- return self._b <= ofs < self._size
- #END handle compat layer
-
- #} END interface
-
-
-class MapRegionList(list):
- """List of MapRegion instances associating a path with a list of regions."""
- __slots__ = (
- '_path_or_fd', # path or file descriptor which is mapped by all our regions
- '_file_size' # total size of the file we map
- )
-
- def __new__(cls, path):
- return super(MapRegionList, cls).__new__(cls)
-
- def __init__(self, path_or_fd):
- self._path_or_fd = path_or_fd
- self._file_size = None
-
- def client_count(self):
- """:return: amount of clients which hold a reference to this instance"""
- return getrefcount(self)-3
-
- def path_or_fd(self):
- """:return: path or file descriptor we are attached to"""
- return self._path_or_fd
-
- def file_size(self):
- """:return: size of file we manager"""
- if self._file_size is None:
- if isinstance(self._path_or_fd, basestring):
- self._file_size = os.stat(self._path_or_fd).st_size
- else:
- self._file_size = os.fstat(self._path_or_fd).st_size
- #END handle path type
- #END update file size
- return self._file_size
-
-#} END utilty classes
diff --git a/python-packages/tastypie/__init__.py b/python-packages/tastypie/__init__.py
deleted file mode 100644
index bb8dd9f9c5..0000000000
--- a/python-packages/tastypie/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from __future__ import unicode_literals
-
-
-__author__ = 'Daniel Lindsley & the Tastypie core team'
-__version__ = (0, 11, 0)
diff --git a/python-packages/tastypie/admin.py b/python-packages/tastypie/admin.py
deleted file mode 100644
index 87677051d2..0000000000
--- a/python-packages/tastypie/admin.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from __future__ import unicode_literals
-from django.conf import settings
-from django.contrib import admin
-
-
-if 'django.contrib.auth' in settings.INSTALLED_APPS:
- from tastypie.models import ApiKey
-
- class ApiKeyInline(admin.StackedInline):
- model = ApiKey
- extra = 0
-
- ABSTRACT_APIKEY = getattr(settings, 'TASTYPIE_ABSTRACT_APIKEY', False)
-
- if ABSTRACT_APIKEY and not isinstance(ABSTRACT_APIKEY, bool):
- raise TypeError("'TASTYPIE_ABSTRACT_APIKEY' must be either 'True' "
- "or 'False'.")
-
- if not ABSTRACT_APIKEY:
- admin.site.register(ApiKey)
diff --git a/python-packages/tastypie/api.py b/python-packages/tastypie/api.py
deleted file mode 100644
index d8788afb94..0000000000
--- a/python-packages/tastypie/api.py
+++ /dev/null
@@ -1,184 +0,0 @@
-from __future__ import unicode_literals
-import warnings
-from django.conf.urls import url, patterns, include
-from django.core.exceptions import ImproperlyConfigured
-from django.core.urlresolvers import reverse
-from django.http import HttpResponse, HttpResponseBadRequest
-from tastypie.exceptions import NotRegistered, BadRequest
-from tastypie.serializers import Serializer
-from tastypie.utils import trailing_slash, is_valid_jsonp_callback_value
-from tastypie.utils.mime import determine_format, build_content_type
-
-
-class Api(object):
- """
- Implements a registry to tie together the various resources that make up
- an API.
-
- Especially useful for navigation, HATEOAS and for providing multiple
- versions of your API.
-
- Optionally supplying ``api_name`` allows you to name the API. Generally,
- this is done with version numbers (i.e. ``v1``, ``v2``, etc.) but can
- be named any string.
- """
- def __init__(self, api_name="v1", serializer_class=Serializer):
- self.api_name = api_name
- self._registry = {}
- self._canonicals = {}
- self.serializer = serializer_class()
-
- def register(self, resource, canonical=True):
- """
- Registers an instance of a ``Resource`` subclass with the API.
-
- Optionally accept a ``canonical`` argument, which indicates that the
- resource being registered is the canonical variant. Defaults to
- ``True``.
- """
- resource_name = getattr(resource._meta, 'resource_name', None)
-
- if resource_name is None:
- raise ImproperlyConfigured("Resource %r must define a 'resource_name'." % resource)
-
- self._registry[resource_name] = resource
-
- if canonical is True:
- if resource_name in self._canonicals:
- warnings.warn("A new resource '%r' is replacing the existing canonical URL for '%s'." % (resource, resource_name), Warning, stacklevel=2)
-
- self._canonicals[resource_name] = resource
- # TODO: This is messy, but makes URI resolution on FK/M2M fields
- # work consistently.
- resource._meta.api_name = self.api_name
- resource.__class__.Meta.api_name = self.api_name
-
- def unregister(self, resource_name):
- """
- If present, unregisters a resource from the API.
- """
- if resource_name in self._registry:
- del(self._registry[resource_name])
-
- if resource_name in self._canonicals:
- del(self._canonicals[resource_name])
-
- def canonical_resource_for(self, resource_name):
- """
- Returns the canonical resource for a given ``resource_name``.
- """
- if resource_name in self._canonicals:
- return self._canonicals[resource_name]
-
- raise NotRegistered("No resource was registered as canonical for '%s'." % resource_name)
-
- def wrap_view(self, view):
- def wrapper(request, *args, **kwargs):
- try:
- return getattr(self, view)(request, *args, **kwargs)
- except BadRequest:
- return HttpResponseBadRequest()
- return wrapper
-
- def override_urls(self):
- """
- Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead.
- """
- return []
-
- def prepend_urls(self):
- """
- A hook for adding your own URLs or matching before the default URLs.
- """
- return []
-
- @property
- def urls(self):
- """
- Provides URLconf details for the ``Api`` and all registered
- ``Resources`` beneath it.
- """
- pattern_list = [
- url(r"^(?P%s)%s$" % (self.api_name, trailing_slash()), self.wrap_view('top_level'), name="api_%s_top_level" % self.api_name),
- ]
-
- for name in sorted(self._registry.keys()):
- self._registry[name].api_name = self.api_name
- pattern_list.append((r"^(?P%s)/" % self.api_name, include(self._registry[name].urls)))
-
- urlpatterns = self.prepend_urls()
-
- overridden_urls = self.override_urls()
- if overridden_urls:
- warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.")
- urlpatterns += overridden_urls
-
- urlpatterns += patterns('',
- *pattern_list
- )
- return urlpatterns
-
- def top_level(self, request, api_name=None):
- """
- A view that returns a serialized list of all resources registers
- to the ``Api``. Useful for discovery.
- """
- available_resources = {}
-
- if api_name is None:
- api_name = self.api_name
-
- for name in sorted(self._registry.keys()):
- available_resources[name] = {
- 'list_endpoint': self._build_reverse_url("api_dispatch_list", kwargs={
- 'api_name': api_name,
- 'resource_name': name,
- }),
- 'schema': self._build_reverse_url("api_get_schema", kwargs={
- 'api_name': api_name,
- 'resource_name': name,
- }),
- }
-
- desired_format = determine_format(request, self.serializer)
-
- options = {}
-
- if 'text/javascript' in desired_format:
- callback = request.GET.get('callback', 'callback')
-
- if not is_valid_jsonp_callback_value(callback):
- raise BadRequest('JSONP callback name is invalid.')
-
- options['callback'] = callback
-
- serialized = self.serializer.serialize(available_resources, desired_format, options)
- return HttpResponse(content=serialized, content_type=build_content_type(desired_format))
-
- def _build_reverse_url(self, name, args=None, kwargs=None):
- """
- A convenience hook for overriding how URLs are built.
-
- See ``NamespacedApi._build_reverse_url`` for an example.
- """
- return reverse(name, args=args, kwargs=kwargs)
-
-
-class NamespacedApi(Api):
- """
- An API subclass that respects Django namespaces.
- """
- def __init__(self, api_name="v1", urlconf_namespace=None, **kwargs):
- super(NamespacedApi, self).__init__(api_name=api_name, **kwargs)
- self.urlconf_namespace = urlconf_namespace
-
- def register(self, resource, canonical=True):
- super(NamespacedApi, self).register(resource, canonical=canonical)
-
- if canonical is True:
- # Plop in the namespace here as well.
- resource._meta.urlconf_namespace = self.urlconf_namespace
-
- def _build_reverse_url(self, name, args=None, kwargs=None):
- namespaced = "%s:%s" % (self.urlconf_namespace, name)
- return reverse(namespaced, args=args, kwargs=kwargs)
diff --git a/python-packages/tastypie/authentication.py b/python-packages/tastypie/authentication.py
deleted file mode 100644
index 7f3281d6a1..0000000000
--- a/python-packages/tastypie/authentication.py
+++ /dev/null
@@ -1,517 +0,0 @@
-from __future__ import unicode_literals
-import base64
-import hmac
-import time
-import uuid
-
-from django.conf import settings
-from django.contrib.auth import authenticate
-from django.core.exceptions import ImproperlyConfigured
-from django.middleware.csrf import _sanitize_token, constant_time_compare
-from django.utils.http import same_origin
-from django.utils.translation import ugettext as _
-from tastypie.http import HttpUnauthorized
-from tastypie.compat import User, username_field
-
-try:
- from hashlib import sha1
-except ImportError:
- import sha
- sha1 = sha.sha
-
-try:
- import python_digest
-except ImportError:
- python_digest = None
-
-try:
- import oauth2
-except ImportError:
- oauth2 = None
-
-try:
- import oauth_provider
-except ImportError:
- oauth_provider = None
-
-
-class Authentication(object):
- """
- A simple base class to establish the protocol for auth.
-
- By default, this indicates the user is always authenticated.
- """
- def __init__(self, require_active=True):
- self.require_active = require_active
-
- def is_authenticated(self, request, **kwargs):
- """
- Identifies if the user is authenticated to continue or not.
-
- Should return either ``True`` if allowed, ``False`` if not or an
- ``HttpResponse`` if you need something custom.
- """
- return True
-
- def get_identifier(self, request):
- """
- Provides a unique string identifier for the requestor.
-
- This implementation returns a combination of IP address and hostname.
- """
- return "%s_%s" % (request.META.get('REMOTE_ADDR', 'noaddr'), request.META.get('REMOTE_HOST', 'nohost'))
-
- def check_active(self, user):
- """
- Ensures the user has an active account.
-
- Optimized for the ``django.contrib.auth.models.User`` case.
- """
- if not self.require_active:
- # Ignore & move on.
- return True
-
- return user.is_active
-
-
-class BasicAuthentication(Authentication):
- """
- Handles HTTP Basic auth against a specific auth backend if provided,
- or against all configured authentication backends using the
- ``authenticate`` method from ``django.contrib.auth``.
-
- Optional keyword arguments:
-
- ``backend``
- If specified, use a specific ``django.contrib.auth`` backend instead
- of checking all backends specified in the ``AUTHENTICATION_BACKENDS``
- setting.
- ``realm``
- The realm to use in the ``HttpUnauthorized`` response. Default:
- ``django-tastypie``.
- """
- def __init__(self, backend=None, realm='django-tastypie', **kwargs):
- super(BasicAuthentication, self).__init__(**kwargs)
- self.backend = backend
- self.realm = realm
-
- def _unauthorized(self):
- response = HttpUnauthorized()
- # FIXME: Sanitize realm.
- response['WWW-Authenticate'] = 'Basic Realm="%s"' % self.realm
- return response
-
- def is_authenticated(self, request, **kwargs):
- """
- Checks a user's basic auth credentials against the current
- Django auth backend.
-
- Should return either ``True`` if allowed, ``False`` if not or an
- ``HttpResponse`` if you need something custom.
- """
- if not request.META.get('HTTP_AUTHORIZATION'):
- return self._unauthorized()
-
- try:
- (auth_type, data) = request.META['HTTP_AUTHORIZATION'].split()
- if auth_type.lower() != 'basic':
- return self._unauthorized()
- user_pass = base64.b64decode(data).decode('utf-8')
- except:
- return self._unauthorized()
-
- bits = user_pass.split(':', 1)
-
- if len(bits) != 2:
- return self._unauthorized()
-
- if self.backend:
- user = self.backend.authenticate(username=bits[0], password=bits[1])
- else:
- user = authenticate(username=bits[0], password=bits[1])
-
- if user is None:
- return self._unauthorized()
-
- if not self.check_active(user):
- return False
-
- request.user = user
- return True
-
- def get_identifier(self, request):
- """
- Provides a unique string identifier for the requestor.
-
- This implementation returns the user's basic auth username.
- """
- return request.META.get('REMOTE_USER', 'nouser')
-
-
-class ApiKeyAuthentication(Authentication):
- """
- Handles API key auth, in which a user provides a username & API key.
-
- Uses the ``ApiKey`` model that ships with tastypie. If you wish to use
- a different model, override the ``get_key`` method to perform the key check
- as suits your needs.
- """
- def _unauthorized(self):
- return HttpUnauthorized()
-
- def extract_credentials(self, request):
- if request.META.get('HTTP_AUTHORIZATION') and request.META['HTTP_AUTHORIZATION'].lower().startswith('apikey '):
- (auth_type, data) = request.META['HTTP_AUTHORIZATION'].split()
-
- if auth_type.lower() != 'apikey':
- raise ValueError("Incorrect authorization header.")
-
- username, api_key = data.split(':', 1)
- else:
- username = request.GET.get('username') or request.POST.get('username')
- api_key = request.GET.get('api_key') or request.POST.get('api_key')
-
- return username, api_key
-
- def is_authenticated(self, request, **kwargs):
- """
- Finds the user and checks their API key.
-
- Should return either ``True`` if allowed, ``False`` if not or an
- ``HttpResponse`` if you need something custom.
- """
- from tastypie.compat import User
-
- try:
- username, api_key = self.extract_credentials(request)
- except ValueError:
- return self._unauthorized()
-
- if not username or not api_key:
- return self._unauthorized()
-
- try:
- lookup_kwargs = {username_field: username}
- user = User.objects.get(**lookup_kwargs)
- except (User.DoesNotExist, User.MultipleObjectsReturned):
- return self._unauthorized()
-
- if not self.check_active(user):
- return False
-
- key_auth_check = self.get_key(user, api_key)
- if key_auth_check and not isinstance(key_auth_check, HttpUnauthorized):
- request.user = user
-
- return key_auth_check
-
- def get_key(self, user, api_key):
- """
- Attempts to find the API key for the user. Uses ``ApiKey`` by default
- but can be overridden.
- """
- from tastypie.models import ApiKey
-
- try:
- ApiKey.objects.get(user=user, key=api_key)
- except ApiKey.DoesNotExist:
- return self._unauthorized()
-
- return True
-
- def get_identifier(self, request):
- """
- Provides a unique string identifier for the requestor.
-
- This implementation returns the user's username.
- """
- username, api_key = self.extract_credentials(request)
- return username or 'nouser'
-
-
-class SessionAuthentication(Authentication):
- """
- An authentication mechanism that piggy-backs on Django sessions.
-
- This is useful when the API is talking to Javascript on the same site.
- Relies on the user being logged in through the standard Django login
- setup.
-
- Requires a valid CSRF token.
- """
- def is_authenticated(self, request, **kwargs):
- """
- Checks to make sure the user is logged in & has a Django session.
- """
- # Cargo-culted from Django 1.3/1.4's ``django/middleware/csrf.py``.
- # We can't just use what's there, since the return values will be
- # wrong.
- # We also can't risk accessing ``request.POST``, which will break with
- # the serialized bodies.
- if request.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
- return request.user.is_authenticated()
-
- if getattr(request, '_dont_enforce_csrf_checks', False):
- return request.user.is_authenticated()
-
- csrf_token = _sanitize_token(request.COOKIES.get(settings.CSRF_COOKIE_NAME, ''))
-
- if request.is_secure():
- referer = request.META.get('HTTP_REFERER')
-
- if referer is None:
- return False
-
- good_referer = 'https://%s/' % request.get_host()
-
- if not same_origin(referer, good_referer):
- return False
-
- request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '')
-
- if not constant_time_compare(request_csrf_token, csrf_token):
- return False
-
- return request.user.is_authenticated()
-
- def get_identifier(self, request):
- """
- Provides a unique string identifier for the requestor.
-
- This implementation returns the user's username.
- """
- return getattr(request.user, username_field)
-
-
-class DigestAuthentication(Authentication):
- """
- Handles HTTP Digest auth against a specific auth backend if provided,
- or against all configured authentication backends using the
- ``authenticate`` method from ``django.contrib.auth``. However, instead of
- the user's password, their API key should be used.
-
- Optional keyword arguments:
-
- ``backend``
- If specified, use a specific ``django.contrib.auth`` backend instead
- of checking all backends specified in the ``AUTHENTICATION_BACKENDS``
- setting.
- ``realm``
- The realm to use in the ``HttpUnauthorized`` response. Default:
- ``django-tastypie``.
- """
- def __init__(self, backend=None, realm='django-tastypie', **kwargs):
- super(DigestAuthentication, self).__init__(**kwargs)
- self.backend = backend
- self.realm = realm
-
- if python_digest is None:
- raise ImproperlyConfigured("The 'python_digest' package could not be imported. It is required for use with the 'DigestAuthentication' class.")
-
- def _unauthorized(self):
- response = HttpUnauthorized()
- new_uuid = uuid.uuid4()
- opaque = hmac.new(str(new_uuid).encode('utf-8'), digestmod=sha1).hexdigest()
- response['WWW-Authenticate'] = python_digest.build_digest_challenge(
- timestamp=time.time(),
- secret=getattr(settings, 'SECRET_KEY', ''),
- realm=self.realm,
- opaque=opaque,
- stale=False
- )
- return response
-
- def is_authenticated(self, request, **kwargs):
- """
- Finds the user and checks their API key.
-
- Should return either ``True`` if allowed, ``False`` if not or an
- ``HttpResponse`` if you need something custom.
- """
- if not request.META.get('HTTP_AUTHORIZATION'):
- return self._unauthorized()
-
- try:
- (auth_type, data) = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
-
- if auth_type.lower() != 'digest':
- return self._unauthorized()
- except:
- return self._unauthorized()
-
- digest_response = python_digest.parse_digest_credentials(request.META['HTTP_AUTHORIZATION'])
-
- # FIXME: Should the nonce be per-user?
- if not python_digest.validate_nonce(digest_response.nonce, getattr(settings, 'SECRET_KEY', '')):
- return self._unauthorized()
-
- user = self.get_user(digest_response.username)
- api_key = self.get_key(user)
-
- if user is False or api_key is False:
- return self._unauthorized()
-
- expected = python_digest.calculate_request_digest(
- request.method,
- python_digest.calculate_partial_digest(digest_response.username, self.realm, api_key),
- digest_response)
-
- if not digest_response.response == expected:
- return self._unauthorized()
-
- if not self.check_active(user):
- return False
-
- request.user = user
- return True
-
- def get_user(self, username):
- try:
- lookup_kwargs = {username_field: username}
- user = User.objects.get(**lookup_kwargs)
- except (User.DoesNotExist, User.MultipleObjectsReturned):
- return False
-
- return user
-
- def get_key(self, user):
- """
- Attempts to find the API key for the user. Uses ``ApiKey`` by default
- but can be overridden.
-
- Note that this behaves differently than the ``ApiKeyAuthentication``
- method of the same name.
- """
- from tastypie.models import ApiKey
-
- try:
- key = ApiKey.objects.get(user=user)
- except ApiKey.DoesNotExist:
- return False
-
- return key.key
-
- def get_identifier(self, request):
- """
- Provides a unique string identifier for the requestor.
-
- This implementation returns the user's username.
- """
- if hasattr(request, 'user'):
- if hasattr(request.user, 'username'):
- return request.user.username
-
- return 'nouser'
-
-
-class OAuthAuthentication(Authentication):
- """
- Handles OAuth, which checks a user's credentials against a separate service.
- Currently verifies against OAuth 1.0a services.
-
- This does *NOT* provide OAuth authentication in your API, strictly
- consumption.
- """
- def __init__(self, **kwargs):
- super(OAuthAuthentication, self).__init__(**kwargs)
-
- if oauth2 is None:
- raise ImproperlyConfigured("The 'python-oauth2' package could not be imported. It is required for use with the 'OAuthAuthentication' class.")
-
- if oauth_provider is None:
- raise ImproperlyConfigured("The 'django-oauth-plus' package could not be imported. It is required for use with the 'OAuthAuthentication' class.")
-
- def is_authenticated(self, request, **kwargs):
- from oauth_provider.store import store, InvalidTokenError
-
- if self.is_valid_request(request):
- oauth_request = oauth_provider.utils.get_oauth_request(request)
- consumer = store.get_consumer(request, oauth_request, oauth_request.get_parameter('oauth_consumer_key'))
-
- try:
- token = store.get_access_token(request, oauth_request, consumer, oauth_request.get_parameter('oauth_token'))
- except oauth_provider.store.InvalidTokenError:
- return oauth_provider.utils.send_oauth_error(oauth2.Error(_('Invalid access token: %s') % oauth_request.get_parameter('oauth_token')))
-
- try:
- self.validate_token(request, consumer, token)
- except oauth2.Error as e:
- return oauth_provider.utils.send_oauth_error(e)
-
- if consumer and token:
- if not self.check_active(token.user):
- return False
-
- request.user = token.user
- return True
-
- return oauth_provider.utils.send_oauth_error(oauth2.Error(_('You are not allowed to access this resource.')))
-
- return oauth_provider.utils.send_oauth_error(oauth2.Error(_('Invalid request parameters.')))
-
- def is_in(self, params):
- """
- Checks to ensure that all the OAuth parameter names are in the
- provided ``params``.
- """
- from oauth_provider.consts import OAUTH_PARAMETERS_NAMES
-
- for param_name in OAUTH_PARAMETERS_NAMES:
- if param_name not in params:
- return False
-
- return True
-
- def is_valid_request(self, request):
- """
- Checks whether the required parameters are either in the HTTP
- ``Authorization`` header sent by some clients (the preferred method
- according to OAuth spec) or fall back to ``GET/POST``.
- """
- auth_params = request.META.get("HTTP_AUTHORIZATION", [])
- return self.is_in(auth_params) or self.is_in(request.REQUEST)
-
- def validate_token(self, request, consumer, token):
- oauth_server, oauth_request = oauth_provider.utils.initialize_server_request(request)
- return oauth_server.verify_request(oauth_request, consumer, token)
-
-
-class MultiAuthentication(object):
- """
- An authentication backend that tries a number of backends in order.
- """
- def __init__(self, *backends, **kwargs):
- super(MultiAuthentication, self).__init__(**kwargs)
- self.backends = backends
-
- def is_authenticated(self, request, **kwargs):
- """
- Identifies if the user is authenticated to continue or not.
-
- Should return either ``True`` if allowed, ``False`` if not or an
- ``HttpResponse`` if you need something custom.
- """
- unauthorized = False
-
- for backend in self.backends:
- check = backend.is_authenticated(request, **kwargs)
-
- if check:
- if isinstance(check, HttpUnauthorized):
- unauthorized = unauthorized or check
- else:
- request._authentication_backend = backend
- return check
-
- return unauthorized
-
- def get_identifier(self, request):
- """
- Provides a unique string identifier for the requestor.
-
- This implementation returns a combination of IP address and hostname.
- """
- try:
- return request._authentication_backend.get_identifier(request)
- except AttributeError:
- return 'nouser'
diff --git a/python-packages/tastypie/authorization.py b/python-packages/tastypie/authorization.py
deleted file mode 100644
index 7a4c647ec0..0000000000
--- a/python-packages/tastypie/authorization.py
+++ /dev/null
@@ -1,245 +0,0 @@
-from __future__ import unicode_literals
-from tastypie.exceptions import TastypieError, Unauthorized
-
-
-class Authorization(object):
- """
- A base class that provides no permissions checking.
- """
- def __get__(self, instance, owner):
- """
- Makes ``Authorization`` a descriptor of ``ResourceOptions`` and creates
- a reference to the ``ResourceOptions`` object that may be used by
- methods of ``Authorization``.
- """
- self.resource_meta = instance
- return self
-
- def apply_limits(self, request, object_list):
- """
- Deprecated.
-
- FIXME: REMOVE BEFORE 1.0
- """
- raise TastypieError("Authorization classes no longer support `apply_limits`. Please update to using `read_list`.")
-
- def read_list(self, object_list, bundle):
- """
- Returns a list of all the objects a user is allowed to read.
-
- Should return an empty list if none are allowed.
-
- Returns the entire list by default.
- """
- return object_list
-
- def read_detail(self, object_list, bundle):
- """
- Returns either ``True`` if the user is allowed to read the object in
- question or throw ``Unauthorized`` if they are not.
-
- Returns ``True`` by default.
- """
- return True
-
- def create_list(self, object_list, bundle):
- """
- Unimplemented, as Tastypie never creates entire new lists, but
- present for consistency & possible extension.
- """
- raise NotImplementedError("Tastypie has no way to determine if all objects should be allowed to be created.")
-
- def create_detail(self, object_list, bundle):
- """
- Returns either ``True`` if the user is allowed to create the object in
- question or throw ``Unauthorized`` if they are not.
-
- Returns ``True`` by default.
- """
- return True
-
- def update_list(self, object_list, bundle):
- """
- Returns a list of all the objects a user is allowed to update.
-
- Should return an empty list if none are allowed.
-
- Returns the entire list by default.
- """
- return object_list
-
- def update_detail(self, object_list, bundle):
- """
- Returns either ``True`` if the user is allowed to update the object in
- question or throw ``Unauthorized`` if they are not.
-
- Returns ``True`` by default.
- """
- return True
-
- def delete_list(self, object_list, bundle):
- """
- Returns a list of all the objects a user is allowed to delete.
-
- Should return an empty list if none are allowed.
-
- Returns the entire list by default.
- """
- return object_list
-
- def delete_detail(self, object_list, bundle):
- """
- Returns either ``True`` if the user is allowed to delete the object in
- question or throw ``Unauthorized`` if they are not.
-
- Returns ``True`` by default.
- """
- return True
-
-
-class ReadOnlyAuthorization(Authorization):
- """
- Default Authentication class for ``Resource`` objects.
-
- Only allows ``GET`` requests.
- """
- def read_list(self, object_list, bundle):
- return object_list
-
- def read_detail(self, object_list, bundle):
- return True
-
- def create_list(self, object_list, bundle):
- return []
-
- def create_detail(self, object_list, bundle):
- raise Unauthorized("You are not allowed to access that resource.")
-
- def update_list(self, object_list, bundle):
- return []
-
- def update_detail(self, object_list, bundle):
- raise Unauthorized("You are not allowed to access that resource.")
-
- def delete_list(self, object_list, bundle):
- return []
-
- def delete_detail(self, object_list, bundle):
- raise Unauthorized("You are not allowed to access that resource.")
-
-
-class DjangoAuthorization(Authorization):
- """
- Uses permission checking from ``django.contrib.auth`` to map
- ``POST / PUT / DELETE / PATCH`` to their equivalent Django auth
- permissions.
-
- Both the list & detail variants simply check the model they're based
- on, as that's all the more granular Django's permission setup gets.
- """
- def base_checks(self, request, model_klass):
- # If it doesn't look like a model, we can't check permissions.
- if not model_klass or not getattr(model_klass, '_meta', None):
- return False
-
- # User must be logged in to check permissions.
- if not hasattr(request, 'user'):
- return False
-
- return model_klass
-
- def read_list(self, object_list, bundle):
- klass = self.base_checks(bundle.request, object_list.model)
-
- if klass is False:
- return []
-
- # GET-style methods are always allowed.
- return object_list
-
- def read_detail(self, object_list, bundle):
- klass = self.base_checks(bundle.request, bundle.obj.__class__)
-
- if klass is False:
- raise Unauthorized("You are not allowed to access that resource.")
-
- # GET-style methods are always allowed.
- return True
-
- def create_list(self, object_list, bundle):
- klass = self.base_checks(bundle.request, object_list.model)
-
- if klass is False:
- return []
-
- permission = '%s.add_%s' % (klass._meta.app_label, klass._meta.module_name)
-
- if not bundle.request.user.has_perm(permission):
- return []
-
- return object_list
-
- def create_detail(self, object_list, bundle):
- klass = self.base_checks(bundle.request, bundle.obj.__class__)
-
- if klass is False:
- raise Unauthorized("You are not allowed to access that resource.")
-
- permission = '%s.add_%s' % (klass._meta.app_label, klass._meta.module_name)
-
- if not bundle.request.user.has_perm(permission):
- raise Unauthorized("You are not allowed to access that resource.")
-
- return True
-
- def update_list(self, object_list, bundle):
- klass = self.base_checks(bundle.request, object_list.model)
-
- if klass is False:
- return []
-
- permission = '%s.change_%s' % (klass._meta.app_label, klass._meta.module_name)
-
- if not bundle.request.user.has_perm(permission):
- return []
-
- return object_list
-
- def update_detail(self, object_list, bundle):
- klass = self.base_checks(bundle.request, bundle.obj.__class__)
-
- if klass is False:
- raise Unauthorized("You are not allowed to access that resource.")
-
- permission = '%s.change_%s' % (klass._meta.app_label, klass._meta.module_name)
-
- if not bundle.request.user.has_perm(permission):
- raise Unauthorized("You are not allowed to access that resource.")
-
- return True
-
- def delete_list(self, object_list, bundle):
- klass = self.base_checks(bundle.request, object_list.model)
-
- if klass is False:
- return []
-
- permission = '%s.delete_%s' % (klass._meta.app_label, klass._meta.module_name)
-
- if not bundle.request.user.has_perm(permission):
- return []
-
- return object_list
-
- def delete_detail(self, object_list, bundle):
- klass = self.base_checks(bundle.request, bundle.obj.__class__)
-
- if klass is False:
- raise Unauthorized("You are not allowed to access that resource.")
-
- permission = '%s.delete_%s' % (klass._meta.app_label, klass._meta.module_name)
-
- if not bundle.request.user.has_perm(permission):
- raise Unauthorized("You are not allowed to access that resource.")
-
- return True
diff --git a/python-packages/tastypie/bundle.py b/python-packages/tastypie/bundle.py
deleted file mode 100644
index 6adc8fef16..0000000000
--- a/python-packages/tastypie/bundle.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from __future__ import unicode_literals
-from django.http import HttpRequest
-
-
-# In a separate file to avoid circular imports...
-class Bundle(object):
- """
- A small container for instances and converted data for the
- ``dehydrate/hydrate`` cycle.
-
- Necessary because the ``dehydrate/hydrate`` cycle needs to access data at
- different points.
- """
- def __init__(self,
- obj=None,
- data=None,
- request=None,
- related_obj=None,
- related_name=None,
- objects_saved=None,
- related_objects_to_save=None,
- ):
- self.obj = obj
- self.data = data or {}
- self.request = request or HttpRequest()
- self.related_obj = related_obj
- self.related_name = related_name
- self.errors = {}
- self.objects_saved = objects_saved or set()
- self.related_objects_to_save = related_objects_to_save or {}
-
- def __repr__(self):
- return "" % (self.obj, self.data)
diff --git a/python-packages/tastypie/cache.py b/python-packages/tastypie/cache.py
deleted file mode 100644
index 22bfd43979..0000000000
--- a/python-packages/tastypie/cache.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from __future__ import unicode_literals
-from django.core.cache import get_cache
-
-
-class NoCache(object):
- """
- A simplified, swappable base class for caching.
-
- Does nothing save for simulating the cache API.
- """
- def __init__(self, varies=None, *args, **kwargs):
- """
- Optionally accepts a ``varies`` list that will be used in the
- Vary header. Defaults to ["Accept"].
- """
- super(NoCache, self).__init__(*args, **kwargs)
- self.varies = varies
-
- if self.varies is None:
- self.varies = ["Accept"]
-
- def get(self, key):
- """
- Always returns ``None``.
- """
- return None
-
- def set(self, key, value, timeout=60):
- """
- No-op for setting values in the cache.
- """
- pass
-
- def cacheable(self, request, response):
- """
- Returns True or False if the request -> response is capable of being
- cached.
- """
- return bool(request.method == "GET" and response.status_code == 200)
-
- def cache_control(self):
- """
- No-op for returning values for cache-control
- """
- return {
- 'no_cache': True,
- }
-
-
-class SimpleCache(NoCache):
- """
- Uses Django's current ``CACHES`` configuration to store cached data.
- """
-
- def __init__(self, cache_name='default', timeout=None, public=None,
- private=None, *args, **kwargs):
- """
- Optionally accepts a ``timeout`` in seconds for the resource's cache.
- Defaults to ``60`` seconds.
- """
- super(SimpleCache, self).__init__(*args, **kwargs)
- self.cache = get_cache(cache_name)
- self.timeout = timeout or self.cache.default_timeout
- self.public = public
- self.private = private
-
- def get(self, key, **kwargs):
- """
- Gets a key from the cache. Returns ``None`` if the key is not found.
- """
- return self.cache.get(key, **kwargs)
-
- def set(self, key, value, timeout=None):
- """
- Sets a key-value in the cache.
-
- Optionally accepts a ``timeout`` in seconds. Defaults to ``None`` which
- uses the resource's default timeout.
- """
-
- if timeout is None:
- timeout = self.timeout
-
- self.cache.set(key, value, timeout)
-
- def cache_control(self):
- control = {
- 'max_age': self.timeout,
- 's_maxage': self.timeout,
- }
-
- if self.public is not None:
- control["public"] = self.public
-
- if self.private is not None:
- control["private"] = self.private
-
- return control
diff --git a/python-packages/tastypie/compat.py b/python-packages/tastypie/compat.py
deleted file mode 100644
index aa79610313..0000000000
--- a/python-packages/tastypie/compat.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from __future__ import unicode_literals
-from django.conf import settings
-from django.core.exceptions import ImproperlyConfigured
-import django
-
-__all__ = ['User', 'AUTH_USER_MODEL']
-
-AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
-
-# Django 1.5+ compatibility
-if django.VERSION >= (1, 5):
- try:
- from django.contrib.auth import get_user_model
- User = get_user_model()
- username_field = User.USERNAME_FIELD
- except ImproperlyConfigured:
- # The the users model might not be read yet.
- # This can happen is when setting up the create_api_key signal, in your
- # custom user module.
- User = None
- username_field = None
-else:
- from django.contrib.auth.models import User
- username_field = 'username'
diff --git a/python-packages/tastypie/constants.py b/python-packages/tastypie/constants.py
deleted file mode 100644
index 89f7fb79da..0000000000
--- a/python-packages/tastypie/constants.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from __future__ import unicode_literals
-
-# Enable all basic ORM filters but do not allow filtering across relationships.
-ALL = 1
-# Enable all ORM filters, including across relationships
-ALL_WITH_RELATIONS = 2
diff --git a/python-packages/tastypie/contrib/__init__.py b/python-packages/tastypie/contrib/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/tastypie/contrib/contenttypes/__init__.py b/python-packages/tastypie/contrib/contenttypes/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/tastypie/contrib/contenttypes/fields.py b/python-packages/tastypie/contrib/contenttypes/fields.py
deleted file mode 100644
index c274190247..0000000000
--- a/python-packages/tastypie/contrib/contenttypes/fields.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from __future__ import unicode_literals
-from functools import partial
-from tastypie import fields
-from tastypie.resources import Resource
-from tastypie.exceptions import ApiFieldError
-from django.db import models
-from django.core.exceptions import ObjectDoesNotExist
-from .resources import GenericResource
-
-
-class GenericForeignKeyField(fields.ToOneField):
- """
- Provides access to GenericForeignKey objects from the django content_types
- framework.
- """
-
- def __init__(self, to, attribute, **kwargs):
- if not isinstance(to, dict):
- raise ValueError('to field must be a dictionary in GenericForeignKeyField')
-
- if len(to) <= 0:
- raise ValueError('to field must have some values')
-
- for k, v in to.items():
- if not issubclass(k, models.Model) or not issubclass(v, Resource):
- raise ValueError('to field must map django models to tastypie resources')
-
- super(GenericForeignKeyField, self).__init__(to, attribute, **kwargs)
-
- def get_related_resource(self, related_instance):
- self._to_class = self.to.get(type(related_instance), None)
-
- if self._to_class is None:
- raise TypeError('no resource for model %s' % type(related_instance))
-
- return super(GenericForeignKeyField, self).get_related_resource(related_instance)
-
- @property
- def to_class(self):
- if self._to_class and not issubclass(GenericResource, self._to_class):
- return self._to_class
-
- return partial(GenericResource, resources=self.to.values())
-
- def resource_from_uri(self, fk_resource, uri, request=None, related_obj=None, related_name=None):
- try:
- obj = fk_resource.get_via_uri(uri, request=request)
- fk_resource = self.get_related_resource(obj)
- return super(GenericForeignKeyField, self).resource_from_uri(fk_resource, uri, request, related_obj, related_name)
- except ObjectDoesNotExist:
- raise ApiFieldError("Could not find the provided object via resource URI '%s'." % uri)
-
- def build_related_resource(self, *args, **kwargs):
- self._to_class = None
- return super(GenericForeignKeyField, self).build_related_resource(*args, **kwargs)
diff --git a/python-packages/tastypie/contrib/contenttypes/resources.py b/python-packages/tastypie/contrib/contenttypes/resources.py
deleted file mode 100644
index aa70ca6da1..0000000000
--- a/python-packages/tastypie/contrib/contenttypes/resources.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from __future__ import unicode_literals
-from tastypie.bundle import Bundle
-from tastypie.resources import ModelResource
-from tastypie.exceptions import NotFound
-from django.core.urlresolvers import resolve, Resolver404, get_script_prefix
-
-
-class GenericResource(ModelResource):
- """
- Provides a stand-in resource for GFK relations.
- """
- def __init__(self, resources, *args, **kwargs):
- self.resource_mapping = dict((r._meta.resource_name, r) for r in resources)
- return super(GenericResource, self).__init__(*args, **kwargs)
-
- def get_via_uri(self, uri, request=None):
- """
- This pulls apart the salient bits of the URI and populates the
- resource via a ``obj_get``.
-
- Optionally accepts a ``request``.
-
- If you need custom behavior based on other portions of the URI,
- simply override this method.
- """
- prefix = get_script_prefix()
- chomped_uri = uri
-
- if prefix and chomped_uri.startswith(prefix):
- chomped_uri = chomped_uri[len(prefix)-1:]
-
- try:
- view, args, kwargs = resolve(chomped_uri)
- resource_name = kwargs['resource_name']
- resource_class = self.resource_mapping[resource_name]
- except (Resolver404, KeyError):
- raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
-
- parent_resource = resource_class(api_name=self._meta.api_name)
- kwargs = parent_resource.remove_api_resource_names(kwargs)
- bundle = Bundle(request=request)
- return parent_resource.obj_get(bundle, **kwargs)
diff --git a/python-packages/tastypie/contrib/gis/__init__.py b/python-packages/tastypie/contrib/gis/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python-packages/tastypie/contrib/gis/resources.py b/python-packages/tastypie/contrib/gis/resources.py
deleted file mode 100644
index 64ce1ec044..0000000000
--- a/python-packages/tastypie/contrib/gis/resources.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# See COPYING file in this directory.
-# Some code originally from django-boundaryservice
-from __future__ import unicode_literals
-from urllib import unquote
-
-from django.contrib.gis.db.models import GeometryField
-from django.contrib.gis.geos import GEOSGeometry
-
-import json
-
-from tastypie.fields import ApiField, CharField
-from tastypie import resources
-
-
-class GeometryApiField(ApiField):
- """
- Custom ApiField for dealing with data from GeometryFields (by serializing
- them as GeoJSON).
- """
- dehydrated_type = 'geometry'
- help_text = 'Geometry data.'
-
- def hydrate(self, bundle):
- value = super(GeometryApiField, self).hydrate(bundle)
- if value is None:
- return value
- return json.dumps(value)
-
- def dehydrate(self, obj, for_list=False):
- return self.convert(super(GeometryApiField, self).dehydrate(obj))
-
- def convert(self, value):
- if value is None:
- return None
-
- if isinstance(value, dict):
- return value
-
- # Get ready-made geojson serialization and then convert it _back_ to
- # a Python object so that tastypie can serialize it as part of the
- # bundle.
- return json.loads(value.geojson)
-
-
-class ModelResource(resources.ModelResource):
- """
- ModelResource subclass that handles geometry fields as GeoJSON.
- """
- @classmethod
- def api_field_from_django_field(cls, f, default=CharField):
- """
- Overrides default field handling to support custom GeometryApiField.
- """
- if isinstance(f, GeometryField):
- return GeometryApiField
-
- return super(ModelResource, cls).api_field_from_django_field(f, default)
-
- def filter_value_to_python(self, value, field_name, filters, filter_expr,
- filter_type):
- value = super(ModelResource, self).filter_value_to_python(
- value, field_name, filters, filter_expr, filter_type)
-
- # If we are filtering on a GeometryApiField then we should try
- # and convert this to a GEOSGeometry object. The conversion
- # will fail if we don't have value JSON, so in that case we'll
- # just return ``value`` as normal.
- if isinstance(self.fields[field_name], GeometryApiField):
- try:
- value = GEOSGeometry(unquote(value))
- except ValueError:
- pass
- return value
diff --git a/python-packages/tastypie/exceptions.py b/python-packages/tastypie/exceptions.py
deleted file mode 100644
index fdd90cc8b2..0000000000
--- a/python-packages/tastypie/exceptions.py
+++ /dev/null
@@ -1,101 +0,0 @@
-from __future__ import unicode_literals
-from django.http import HttpResponse
-
-
-class TastypieError(Exception):
- """A base exception for other tastypie-related errors."""
- pass
-
-
-class HydrationError(TastypieError):
- """Raised when there is an error hydrating data."""
- pass
-
-
-class NotRegistered(TastypieError):
- """
- Raised when the requested resource isn't registered with the ``Api`` class.
- """
- pass
-
-
-class NotFound(TastypieError):
- """
- Raised when the resource/object in question can't be found.
- """
- pass
-
-
-class Unauthorized(TastypieError):
- """
- Raised when the request object is not accessible to the user.
-
- This is different than the ``tastypie.http.HttpUnauthorized`` & is handled
- differently internally.
- """
- pass
-
-
-class ApiFieldError(TastypieError):
- """
- Raised when there is a configuration error with a ``ApiField``.
- """
- pass
-
-
-class UnsupportedFormat(TastypieError):
- """
- Raised when an unsupported serialization format is requested.
- """
- pass
-
-
-class BadRequest(TastypieError):
- """
- A generalized exception for indicating incorrect request parameters.
-
- Handled specially in that the message tossed by this exception will be
- presented to the end user.
- """
- pass
-
-
-class BlueberryFillingFound(TastypieError):
- pass
-
-
-class InvalidFilterError(BadRequest):
- """
- Raised when the end user attempts to use a filter that has not be
- explicitly allowed.
- """
- pass
-
-
-class InvalidSortError(BadRequest):
- """
- Raised when the end user attempts to sort on a field that has not be
- explicitly allowed.
- """
- pass
-
-
-class ImmediateHttpResponse(TastypieError):
- """
- This exception is used to interrupt the flow of processing to immediately
- return a custom HttpResponse.
-
- Common uses include::
-
- * for authentication (like digest/OAuth)
- * for throttling
-
- """
- _response = HttpResponse("Nothing provided.")
-
- def __init__(self, response):
- self._response = response
-
- @property
- def response(self):
- return self._response
diff --git a/python-packages/tastypie/fields.py b/python-packages/tastypie/fields.py
deleted file mode 100644
index bdbb36a11c..0000000000
--- a/python-packages/tastypie/fields.py
+++ /dev/null
@@ -1,901 +0,0 @@
-from __future__ import unicode_literals
-import datetime
-from dateutil.parser import parse
-from decimal import Decimal
-import re
-from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
-from django.utils import datetime_safe, importlib
-from django.utils import six
-from tastypie.bundle import Bundle
-from tastypie.exceptions import ApiFieldError, NotFound
-from tastypie.utils import dict_strip_unicode_keys, make_aware
-
-
-class NOT_PROVIDED:
- def __str__(self):
- return 'No default provided.'
-
-
-DATE_REGEX = re.compile('^(?P\d{4})-(?P\d{2})-(?P\d{2}).*?$')
-DATETIME_REGEX = re.compile('^(?P\d{4})-(?P\d{2})-(?P\d{2})(T|\s+)(?P\d{2}):(?P\d{2}):(?P |