diff --git a/Procfile b/Procfile index c26b927d9..7ed0f14eb 100644 --- a/Procfile +++ b/Procfile @@ -1 +1 @@ -web: aspen --network_address=:$PORT --www_root=doc/ --project_root=doc/.aspen +web: python -m aspen --www_root=doc/ --project_root=doc/.aspen diff --git a/aspen/__init__.py b/aspen/__init__.py index 183e3a716..39a66e48d 100644 --- a/aspen/__init__.py +++ b/aspen/__init__.py @@ -77,10 +77,6 @@ dist = pkg_resources.get_distribution('aspen') __version__ = dist.version WINDOWS = sys.platform[:3] == 'win' -NETWORK_ENGINES = ['cheroot'] - -for entrypoint in pkg_resources.iter_entry_points(group='aspen.network_engines'): - NETWORK_ENGINES.append(entrypoint.name) BUILTIN_RENDERERS = [ 'stdlib_format' , 'stdlib_percent' diff --git a/aspen/__main__.py b/aspen/__main__.py index a59084a88..b0156999d 100644 --- a/aspen/__main__.py +++ b/aspen/__main__.py @@ -16,11 +16,18 @@ a higher performance WSGI server like Gunicorn, uwsgi, Spawning, or the like. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals +from aspen import log_dammit from aspen.website import Website from wsgiref.simple_server import make_server - if __name__ == '__main__': - make_server('0.0.0.0', 8080, Website()).serve_forever() + website = Website() + server = make_server('0.0.0.0', 8080, website) + log_dammit("Greetings, program! Welcome to port 8080.") + server.serve_forever() diff --git a/aspen/algorithms/server.py b/aspen/algorithms/server.py deleted file mode 100644 index 262a8d3d9..000000000 --- a/aspen/algorithms/server.py +++ /dev/null @@ -1,160 +0,0 @@ -""" -aspen.algorithms.server -~~~~~~~~~~~~~~~~~~~~~~~ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - - -import os -import signal -import socket -import sys -import traceback - -import aspen -from aspen import execution -from aspen.website import Website - - -def install_handler_for_SIGHUP(): - """ - """ - def SIGHUP(signum, frame): - aspen.log_dammit("Received HUP, re-executing.") - execution.execute() - if not aspen.WINDOWS: - signal.signal(signal.SIGHUP, SIGHUP) - - -def install_handler_for_SIGINT(): - """ - """ - def SIGINT(signum, frame): - aspen.log_dammit("Received INT, exiting.") - raise SystemExit - signal.signal(signal.SIGINT, SIGINT) - - -def install_handler_for_SIGQUIT(): - """ - """ - def SIGQUIT(signum, frame): - aspen.log_dammit("Received QUIT, exiting.") - raise SystemExit - if not aspen.WINDOWS: - signal.signal(signal.SIGQUIT, SIGQUIT) - - -def get_website_from_argv(argv, algorithm): - """ - - User-developers get this website object inside of their resources and - hooks. It provides access to configuration information in addition to being - a WSGI callable and holding the request/response handling logic. See - aspen/website.py - - """ - if argv is None: - argv = sys.argv[1:] - website = Website(argv, server_algorithm=algorithm) - return {'website': website} - - -def bind_server_to_port(website): - """ - """ - if hasattr(socket, 'AF_UNIX'): - if website.network_sockfam == socket.AF_UNIX: - if os.path.exists(website.network_address): - aspen.log("Removing stale socket.") - os.remove(website.network_address) - if website.network_port is not None: - welcome = "port %d" % website.network_port - else: - welcome = website.network_address - aspen.log("Starting %s engine." % website.network_engine.name) - website.network_engine.bind() - aspen.log_dammit("Greetings, program! Welcome to %s." % welcome) - - -def install_restarter_for_website(website): - """ - """ - if website.changes_reload: - aspen.log("Aspen will restart when configuration scripts or " - "Python modules change.") - execution.install(website) - - -def start(website): - """ - """ - aspen.log_dammit("Starting up Aspen website.") - website.network_engine.start() - - -def stub_website_for_exception(exception, state): - """ - """ - # Without this, the call to stop fails if we had an exception during configuration. - if 'website' not in state: - return {'website': None} - - -def handle_conflict_over_port(exception, website): - """Be friendly about port conflicts. - - The traceback one gets from a port conflict or permission error is not that - friendly. Here's a helper to let the user know (in color?!) that a port - conflict or a permission error is probably the problem. But in case it - isn't (website.start fires the start hook, and maybe the user tries to - connect to a network service in there?), don't fully swallow the exception. - Also, be explicit about the port number. What if they have logging turned - off? Then they won't see the port number in the "Greetings, program!" line. - They definitely won't see it if using an engine like eventlet that binds to - the port early. - - """ - if exception.__class__ is not socket.error: - return - - if website.network_port is not None: - msg = "Is something already running on port %s? Because ..." - if not aspen.WINDOWS: - if website.network_port < 1024: - if os.geteuid() > 0: - msg = ("Do you have permission to bind to port %s?" - " Because ...") - msg %= website.network_port - if not aspen.WINDOWS: - # Assume we can use ANSI color escapes if not on Windows. - # XXX Maybe a bad assumption if this is going into a log - # file? See also: colorama - msg = '\033[01;33m%s\033[00m' % msg - aspen.log_dammit(msg) - raise - - -def log_traceback_for_exception(exception): - """ - """ - if exception.__class__ not in (KeyboardInterrupt, SystemExit): - aspen.log_dammit(traceback.format_exc()) - return {'exception': None} - - -def stop(website): - """Stop the server. - """ - if website is None: - return - - aspen.log_dammit("Shutting down Aspen website.") - website.network_engine.stop() - if hasattr(socket, 'AF_UNIX'): - if website.network_sockfam == socket.AF_UNIX: - if os.path.exists(website.network_address): - os.remove(website.network_address) diff --git a/aspen/algorithms/website.py b/aspen/algorithms/website.py index 1e8091273..b7dcc1dfa 100644 --- a/aspen/algorithms/website.py +++ b/aspen/algorithms/website.py @@ -36,10 +36,9 @@ import traceback import aspen -from aspen import dispatcher, resources, sockets +from aspen import dispatcher, resources from aspen.http.request import Request from aspen.http.response import Response -from aspen.sockets.socket import Socket from aspen import typecasting from first import first as _first @@ -67,24 +66,8 @@ def apply_typecasters_to_path(website, request): typecasting.apply_typecasters(website.typecasters, request.line.uri.path) -def get_response_for_socket(request): - socket = sockets.get(request) - if socket is None: - # This is not a socket request. - response = None - elif isinstance(socket, Response): - # Actually, this is a handshake request. - response = socket - else: - assert isinstance(socket, Socket) # sanity check - # This is a socket ... request? - response = socket.respond(request) - return {'response': response} - - -def get_resource_for_request(request, response): - if response is None: - return {'resource': resources.get(request)} +def get_resource_for_request(request): + return {'resource': resources.get(request)} def get_response_for_resource(request, resource=None): diff --git a/aspen/configuration/__init__.py b/aspen/configuration/__init__.py index f579a0ad8..fb5032489 100644 --- a/aspen/configuration/__init__.py +++ b/aspen/configuration/__init__.py @@ -12,7 +12,6 @@ import errno import mimetypes import os -import socket import sys import traceback import pkg_resources @@ -20,7 +19,6 @@ import aspen import aspen.logging -from aspen import execution from aspen.configuration import parse from aspen.configuration.exceptions import ConfigurationError from aspen.configuration.options import OptionParser, DEFAULT @@ -35,10 +33,6 @@ KNOBS = \ { 'configuration_scripts': (lambda: [], parse.list_) - , 'network_engine': ('cheroot', parse.network_engine) - , 'network_address': ( (('0.0.0.0', 8080), socket.AF_INET) - , parse.network_address - ) , 'project_root': (None, parse.identity) , 'logging_threshold': (0, int) , 'www_root': (None, parse.identity) @@ -256,7 +250,6 @@ def safe_getcwd(errorstr): "or --www_root on the command line.") self.www_root = os.path.realpath(self.www_root) - os.chdir(self.www_root) # load renderers self.renderer_factories = {} @@ -290,40 +283,6 @@ def safe_getcwd(errorstr): if not mimetypes.inited: mimetypes.init() - # network_engine - - ## Load modules - ENGINES = {} - for entrypoint in pkg_resources.iter_entry_points(group='aspen.network_engines'): - ENGINES[entrypoint.name] = entrypoint.load() - - if self.network_engine in ENGINES: - # found in a module - Engine = ENGINES[self.network_engine].Engine - else: - # look for a built-in one - try: - capture = {} - python_syntax = 'from aspen.network_engines.%s_ import Engine' - exec python_syntax % self.network_engine in capture - Engine = capture['Engine'] - except ImportError: - # ANSI colors: - # http://stackoverflow.com/questions/287871/ - # http://en.wikipedia.org/wiki/ANSI_escape_code#CSI_codes - # XXX consider http://pypi.python.org/pypi/colorama - msg = "\033[1;31mImportError loading the %s network engine:\033[0m" - aspen.log_dammit(msg % self.network_engine) - raise - self.network_engine = Engine(self.network_engine, self) - - # network_address, network_sockfam, network_port - self.network_address, self.network_sockfam = self.network_address - if self.network_sockfam == socket.AF_INET: - self.network_port = self.network_address[1] - else: - self.network_port = None - self.run_config_scripts() self.show_renderers() @@ -399,9 +358,6 @@ def run_config_scripts(self): else: # problems with default config files are okay, but get logged aspen.log(msg) - else: - aspen.log_dammit("Loading configuration file '%s' (possibly changing settings)" % filepath) - execution.if_changes(filepath) diff --git a/aspen/configuration/mime.types b/aspen/configuration/mime.types index 7a2de6cf8..06e5cde82 100644 --- a/aspen/configuration/mime.types +++ b/aspen/configuration/mime.types @@ -1,5 +1,4 @@ application/json json -application/x-socket.io sock image/x-icon ico text/plain py diff --git a/aspen/configuration/options.py b/aspen/configuration/options.py index 7f4ed7e1f..3bd6a5fee 100644 --- a/aspen/configuration/options.py +++ b/aspen/configuration/options.py @@ -53,18 +53,6 @@ def OptionParser(): "$ASPEN_PROJECT_ROOT/configure-aspen.py") , default=DEFAULT ) - basic.add_option( "-a", "--network_address" - , help=("the IPv4, IPv6, or Unix address to bind to " - "[0.0.0.0:8080]") - , default=DEFAULT - ) - basic.add_option( "-e", "--network_engine" - , help=( "the HTTP engine to use, one of " - + "{%s}" % ','.join(aspen.NETWORK_ENGINES) - + " [%s]" % aspen.NETWORK_ENGINES[0] - ) - , default=DEFAULT - ) basic.add_option( "-l", "--logging_threshold" , help=("a small integer; 1 will suppress most of aspen's " "internal logging, 2 will suppress all it [0]") diff --git a/aspen/configuration/parse.py b/aspen/configuration/parse.py index 093c5c567..23ecaee04 100644 --- a/aspen/configuration/parse.py +++ b/aspen/configuration/parse.py @@ -13,9 +13,6 @@ from __future__ import print_function from __future__ import unicode_literals -import os -import socket - import aspen from aspen.utils import typecheck from aspen.http.response import charset_re @@ -66,94 +63,9 @@ def list_(value): return (extend, out) -def network_engine(value): - typecheck(value, unicode) - if value not in aspen.NETWORK_ENGINES: - msg = "not one of {%s}" % (','.join(aspen.NETWORK_ENGINES)) - raise ValueError(msg) - return value - def renderer(value): typecheck(value, unicode) if value not in aspen.RENDERERS: msg = "not one of {%s}" % (','.join(aspen.RENDERERS)) raise ValueError(msg) return value.encode('US-ASCII') - -def network_address(address): - """Given a socket address string, return a tuple (sockfam, address). - - This is called from a couple places, and is a bit complex. - - """ - typecheck(address, unicode) - - if address[0] in (u'/', u'.'): - if aspen.WINDOWS: - raise ValueError("can't use an AF_UNIX socket on Windows") - # but what about named pipes? - sockfam = socket.AF_UNIX - # We could test to see if the path exists or is creatable, etc. - address = os.path.realpath(address) - - elif address.count(u':') > 1: - sockfam = socket.AF_INET6 - # @@: validate this, eh? - - else: - sockfam = socket.AF_INET - # Here we need a tuple: (str, int). The string must be a valid - # IPv4 address or the empty string, and the int -- the port -- - # must be between 0 and 65535, inclusive. - - - # Break out IP and port. - # ====================== - - if address.count(u':') != 1: - raise ValueError("Wrong number of :'s. Should be exactly 1") - ip_port = address.split(u':') - ip, port = [i.strip() for i in ip_port] - - - # IP - # == - - if ip == u'': - ip = u'0.0.0.0' # IP defaults to INADDR_ANY for AF_INET; specified - # explicitly to avoid accidentally binding to - # INADDR_ANY for AF_INET6. - elif ip == u'localhost': - ip = u'127.0.0.1' # special case for nicer user experience - else: - try: - # socket.inet_aton is more permissive than we'd like - parts = ip.split('.') - assert len(parts) == 4 - for p in parts: - assert p.isdigit() - assert 0 <= int(p) <= 255 - except AssertionError: - raise ValueError("invalid IP") - - - # port - # ==== - # Coerce to int. Must be between 0 and 65535, inclusive. - - try: - port = int(port) - except ValueError: - raise ValueError("invalid port (non-numeric)") - - if not(0 <= port <= 65535): - raise ValueError("invalid port (out of range)") - - - # Success! - # ======== - - address = (ip, port) - - - return address, sockfam diff --git a/aspen/context.py b/aspen/context.py index 41eba4367..47eb75c5d 100644 --- a/aspen/context.py +++ b/aspen/context.py @@ -22,7 +22,6 @@ def __init__(self, request): self.path = request.line.uri.path self.qs = request.line.uri.querystring self.request = request - self.socket = None self.channel = None self.context = self diff --git a/aspen/dispatcher.py b/aspen/dispatcher.py index 2af7759fe..f90410061 100644 --- a/aspen/dispatcher.py +++ b/aspen/dispatcher.py @@ -213,25 +213,6 @@ def dispatch_abstract(listnodes, is_leaf, traverse, find_index, noext_matched, , "Found." ) - -def extract_socket_info(path): - """Given a request object, return a tuple of (str, None) or (str, str). - - Intercept socket requests. We modify the filesystem path so that your - application thinks the request was to /foo.sock instead of to - /foo.sock/blah/blah/blah/. - - """ - if path.endswith('.sock'): - # request path does not include 'querystring'. - raise Response(404) - socket = None - parts = path.rsplit('.sock/', 1) - if len(parts) > 1: - path = parts[0] + '.sock' - socket = parts[1] - return path, socket - def match_index(indices, indir): for filename in indices: index = os.path.join(indir, filename) @@ -259,15 +240,10 @@ def dispatch(request, pure_dispatch=False): This is all side-effecty on the request object, setting, at the least, request.fs, and at worst other random contents including but not limited - to: request.line.uri.path, request.headers, request.socket + to: request.line.uri.path, request.headers. """ - # Handle websockets. - # ================== - - request.line.uri.path.decoded, request.socket = extract_socket_info(request.line.uri.path.decoded) - # Handle URI path parts pathparts = request.line.uri.path.parts diff --git a/aspen/execution.py b/aspen/execution.py deleted file mode 100644 index a68dfb931..000000000 --- a/aspen/execution.py +++ /dev/null @@ -1,168 +0,0 @@ -""" -aspen.execution -+++++++++++++++ - -Implement re-execution of the aspen process. - -When files change on the filesystem or we receive HUP, we want to re-execute -ourselves. - -For thoughts on a more sophisticated approach, see: - - http://sync.in/aspen-reloading - -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import os -import sys - -import aspen - - -extras = set() -mtimes = {} - - -############################################################################### -# Thanks, Bob. :) ############################################################# -# https://bitbucket.org/cherrypy/magicbus/src/41f5dfb95479/magicbus/wspbus.py # - - -# Here he saves the value of os.getcwd(), which, if he is imported early -# enough, will be the directory from which the startup script was run. This is -# needed by _do_execv(), to change back to the original directory before -# execv()ing a new process. This is a defense against the application having -# changed the current working directory (which could make sys.executable "not -# found" if sys.executable is a relative-path, and/or cause other problems). -_startup_cwd = os.getcwd() - - -try: - import fcntl -except ImportError: - max_cloexec_files = 0 -else: - try: - # The __future__ import upgrades everything to unicode, - # but os.sysconf requires a str argument. - max_cloexec_files = os.sysconf(str('SC_OPEN_MAX')) - except AttributeError: - max_cloexec_files = 1024 - - -def _do_execv(): - """Re-execute the current process. - - This must be called from the main thread, because certain platforms - (OS X) don't allow execv to be called in a child thread very well. - - """ - args = sys.argv[:] - aspen.log_dammit("Re-executing %s." % ' '.join(args)) - - if sys.platform[:4] == 'java': - from _systemrestart import SystemRestart - raise SystemRestart - else: - args.insert(0, sys.executable) - if sys.platform == 'win32': - args = ['"%s"' % arg for arg in args] - - os.chdir(_startup_cwd) - if max_cloexec_files: - _set_cloexec() - os.execv(sys.executable, args) - - -def _set_cloexec(): - """Set the CLOEXEC flag on all open files (except stdin/out/err). - - If self.max_cloexec_files is an integer (the default), then on - platforms which support it, it represents the max open files setting - for the operating system. This function will be called just before - the process is restarted via os.execv() to prevent open files - from persisting into the new process. - - Set self.max_cloexec_files to 0 to disable this behavior. - - """ - for fd in range(3, max_cloexec_files): # skip stdin/out/err - try: - flags = fcntl.fcntl(fd, fcntl.F_GETFD) - except IOError: - continue - fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) - -# -############################################################################### - - -execute = _do_execv - -def clear_changes(): - global extras - extras = set() - -def if_changes(filename): - extras.add(filename) - - -def check_one(filename): - """Given a filename, return None or restart. - """ - - # The file may have been removed from the filesystem. - # =================================================== - - if not os.path.isfile(filename): - if filename in mtimes: - aspen.log("File deleted: %s" % filename) - execute() - else: - # We haven't seen the file before. It has probably been loaded - # from a zip (egg) archive. - return - - - # Or not, in which case, check the modification time. - # =================================================== - - mtime = os.stat(filename).st_mtime - if filename not in mtimes: # first time we've seen it - mtimes[filename] = mtime - if mtime > mtimes[filename]: - aspen.log("File changed: %s" % filename) - execute() - - -def check_all(): - """See if any of our available modules have changed on the filesystem. - """ - for name, module in sorted(sys.modules.items()): # module files - filepath = getattr(module, '__file__', None) - if filepath is None: - # We land here when a module is an attribute of another module - # i.e., it exists twice in the sys.modules table, once as its - # canonical representation, and again having been imported - # within another module. - continue - filepath = filepath.endswith(".pyc") and filepath[:-1] or filepath - check_one(filepath) - - for filepath in extras: # additional files - check_one(filepath) - - -# Setup -# ===== - -def install(website): - """Given a Website instance, start a loop over check_all. - """ - for script_path in website.configuration_scripts: - if_changes(script_path) - website.network_engine.start_checking(check_all) diff --git a/aspen/http/request.py b/aspen/http/request.py index 36af2c628..ebb53021e 100644 --- a/aspen/http/request.py +++ b/aspen/http/request.py @@ -186,7 +186,6 @@ class Request(str): """Represent an HTTP Request message. It's bytes, dammit. But lazy. """ - socket = None resource = None original_resource = None server_software = '' diff --git a/aspen/http/response.py b/aspen/http/response.py index 24cd393b0..77779465b 100644 --- a/aspen/http/response.py +++ b/aspen/http/response.py @@ -29,11 +29,8 @@ def __iter__(self): return iter(self.body) def close(self): - socket = getattr(self.request, "socket", None) - if socket is not None: - pass - # implement some socket closing logic here - #self.request.socket.close() + # No longer using this since we ripped out Socket.IO support. + pass # Define a charset name filter. diff --git a/aspen/network_engines/__init__.py b/aspen/network_engines/__init__.py deleted file mode 100644 index dd938b400..000000000 --- a/aspen/network_engines/__init__.py +++ /dev/null @@ -1,88 +0,0 @@ -""" -aspen.network_engines -+++++++++++++++++++++ - -Implement an "engine" abstraction for network access. - -This submodule contains adapters for network I/O. These come in two basic -flavors: threaded and evented. The Buffer object is part of Aspen's Socket.IO -implementation. - -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import time - -from aspen.sockets.buffer import ThreadedBuffer -from aspen.sockets.loop import ThreadedLoop - - -class BaseEngine(object): - - def __init__(self, name, website): - """Takes an identifying string and a WSGI application. - """ - self.name = name - self.website = website - - def bind(self): - """Bind to a socket, based on website.sockfam and website.address. - """ - - def start(self): - """Start listening on the socket. - """ - - def stop(self): - """Stop listening on the socket. - """ - - def start_checking(self, check_all): - """Start a loop that runs check_all every half-second. - """ - - def stop_checking(self): - """Stop the loop that runs check_all (optional). - """ - - -# Threaded -# ======== - -class ThreadedEngine(BaseEngine): - """An engine that uses threads for concurrent persistent sockets. - """ - - def sleep(self, seconds): - time.sleep(seconds) - - Buffer = ThreadedBuffer - Loop = ThreadedLoop - - -# Cooperative -# =========== - -class CooperativeEngine(BaseEngine): - """An engine that assumes cooperative scheduling for persistent sockets. - """ - - def start_socket_loop(self, socket): - """Given a Socket object, start it's main loop. - - The expectation here is that the buffer implementation in use will take - care of cooperative scheduling. So when someone calls socket.recv() in - one of their socket resources, that will block for them but in a - cooperative way. - - """ - socket.loop() - return None - - def sleep(self, seconds): - raise NotImplementedError - - Buffer = NotImplemented diff --git a/aspen/network_engines/cheroot_.py b/aspen/network_engines/cheroot_.py deleted file mode 100644 index 73202a128..000000000 --- a/aspen/network_engines/cheroot_.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -aspen.network_engines.cheroot_ -++++++++++++++++++++++++++++++ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import time -import threading - -import cheroot.wsgi -from aspen.network_engines import ThreadedEngine - - -class Engine(ThreadedEngine): - - cheroot_server = None - - def bind(self): - name = "Aspen! Cheroot!" - self.cheroot_server = cheroot.wsgi.WSGIServer( self.website.network_address - , server_name=name - , wsgi_app=self.website - ) - - def start(self): - self.cheroot_server.start() - - def stop(self): - self.cheroot_server.stop() - - def start_checking(self, check_all): - - def loop(): - while True: - try: - check_all() - except SystemExit: - self.cheroot_server.interrupt = SystemExit - time.sleep(0.5) - - checker = threading.Thread(target=loop) - checker.daemon = True - checker.start() diff --git a/aspen/network_engines/gevent_.py b/aspen/network_engines/gevent_.py deleted file mode 100644 index e4bc48e0b..000000000 --- a/aspen/network_engines/gevent_.py +++ /dev/null @@ -1,163 +0,0 @@ -""" -aspen.network_engines.gevent_ -+++++++++++++++++++++++++++++ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import time - -import gevent -import gevent.socket -import gevent.queue -import gevent.wsgi -from aspen.network_engines import CooperativeEngine -from aspen.sockets import packet -from aspen.sockets.loop import Die - - -class GeventBuffer(gevent.queue.Queue): - """Model a buffer of items. - - There are two of these for each Socket, one for incoming message payloads - and one for outgoing message objects. - - Here's what the flow looks like: - - wire => [msg, msg, msg, msg, msg, msg, msg, msg] => resource - wire <= [msg, msg, msg, msg, msg, msg, msg, msg] <= resource - - """ - - def __init__(self, name, socket=None): - """Takes a string and maybe a socket. - - If given a socket, we will try to play nice with its loop. - - """ - gevent.queue.Queue.__init__(self) - self._socket = socket - self._name = name - - - # flush - # ===== - # Used for outgoing buffer. - - def flush(self): - """Return an iterable of bytestrings or None. - """ - if not self.empty(): - return self.__flusher() - return None - - def __flusher(self): - """Yield strings. - - We unload bytestrings as fast as we can until we run out of time or - bytestrings. On my MacBook Pro I am seeing between 500 and 1000 - messages dumped in 2ms--without any WSGI/HTTP/TCP overhead. We always - yield at least one bytestring to avoid deadlock. - - This generator is instantiated in self.flush. - - """ - if not self.empty(): - yield packet.frame(self.get()) - timeout = time.time() + (0.007) # We have 7ms to dump bytestrings. Go! - while not self.empty() and time.time() < timeout: - yield packet.frame(self.get()) - - - # next - # ==== - # Used for incoming buffer. - - def next(self): - """Return the next item from the queue. - - The first time this is called, we lazily instantiate the generator at - self._blocked. Subsequent calls are directed directly to that - generator's next method. - - """ - self._blocked = self._blocked() - self.next = self._next - return self.next() - - def _next(self): - try: - return self._blocked.next() - except StopIteration: - # When the _blocked generator discovers Die and breaks, the - # effect is a StopIteration here. It's a bug if this happens - # other than when we are disconnecting the socket. - assert self._socket is not None - assert self._socket.loop.please_stop - - def _blocked(self): - """Yield items from self forever. - - This generator is lazily instantiated in self.next. It is designed to - cooperate with ThreadedLoop. - - """ - if self._socket is None: # We're on a Channel. - while 1: - yield self.get() - else: # We're on a Socket. - while not self._socket.loop.please_stop: - out = self.get() - if out is Die: - break # will result in a StopIteration - yield out - - -class GeventLoop(object): - - def __init__(self, socket): - self.socket = socket - self.please_stop = False - self.greenthread = None - - def __call__(self): - while not self.please_stop: - self.socket.tick() - - def start(self): - self.greenthread = gevent.spawn(self) - - def stop(self): - self.please_stop = True - self.socket.incoming.put(Die) - self.greenthread.wait() - - -class Engine(CooperativeEngine): - - wsgi_server = None # a WSGI server, per gevent - - def bind(self): - self.gevent_server = gevent.wsgi.WSGIServer( listener=self.website.network_address - , application=self.website - , log=None - ) - - def sleep(self, seconds): - gevent.sleep(seconds) - - def start(self): - self.gevent_server.serve_forever() - - def start_checking(self, check_all): - def loop(): - while True: - check_all() - self.sleep(0.5) - gevent.spawn(loop) - - Buffer = GeventBuffer - Loop = GeventLoop - diff --git a/aspen/resources/__init__.py b/aspen/resources/__init__.py index 4f84cea91..87970a453 100644 --- a/aspen/resources/__init__.py +++ b/aspen/resources/__init__.py @@ -10,7 +10,6 @@ +-- DynamicResource ----------------------------- | +-- NegotiatedResource 2 1 or more | | +-- RenderedResource 1 or 2 1 - | +-- SocketResource 1, 2, or 3 0 +-- StaticResource 0 1 @@ -43,7 +42,6 @@ from aspen.exceptions import LoadError from aspen.resources.negotiated_resource import NegotiatedResource from aspen.resources.rendered_resource import RenderedResource -from aspen.resources.socket_resource import SocketResource from aspen.resources.static_resource import StaticResource # Cache helpers @@ -149,8 +147,6 @@ def load(request, mtime): if not is_spt: # static Class = StaticResource - elif media_type == 'application/x-socket.io': # socket - Class = SocketResource elif '.' in os.path.basename(guess_with): # rendered Class = RenderedResource else: # negotiated diff --git a/aspen/resources/dynamic_resource.py b/aspen/resources/dynamic_resource.py index 6d9a51730..87b9d29aa 100644 --- a/aspen/resources/dynamic_resource.py +++ b/aspen/resources/dynamic_resource.py @@ -25,7 +25,7 @@ def __getitem__(self, key): class DynamicResource(Resource): - """This is the base for JSON, negotiating, socket, and rendered resources. + """This is the base for negotiating and rendered resources. """ min_pages = None # set on subclass diff --git a/aspen/resources/socket_resource.py b/aspen/resources/socket_resource.py deleted file mode 100644 index 1cbb9af53..000000000 --- a/aspen/resources/socket_resource.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -aspen.resources.socket_resource -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Aspen supports Socket.IO sockets. http://socket.io/ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - - -from aspen.resources.dynamic_resource import DynamicResource - - -class SocketResource(DynamicResource): - - min_pages = 1 - max_pages = 4 - - def respond(self): - """Override and kill it. For sockets the Socket object responds. - """ - raise NotImplemented - - def parse_into_pages(self, raw): - """Extend to add empty pages to the front if there are less than three. - """ - pages = DynamicResource.parse_into_pages(self, raw) - self._prepend_empty_pages(pages, 3) - return pages - - def compile_page(self, page): - """Given two bytestrings, return a code object. - - This method depends on self.fs. - - """ - # See DynamicResource.compile_pages for an explanation of this - # algorithm. - return compile(page.padded_content, self.fs, 'exec') - - def exec_second(self, socket, request): - """Given a Request, return a context dictionary. - """ - context = request.context - context.update(self.pages[0]) - context['socket'] = socket - context['channel'] = socket.channel - exec self.pages[1] in context - return context diff --git a/aspen/server.py b/aspen/server.py deleted file mode 100644 index 42142d79d..000000000 --- a/aspen/server.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -aspen.server -++++++++++++ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - - -import sys -from algorithm import Algorithm - - -def main(): - Server().main() - - -class Server(object): - - def __init__(self, argv=None): - self.argv = argv - - def get_algorithm(self): - return Algorithm.from_dotted_name('aspen.algorithms.server') - - def get_website(self, silent=True): - """Return a website object. Useful in testing. - """ - def work(): - algorithm = self.get_algorithm() - state = algorithm.run(argv=self.argv, _return_after='get_website_from_argv') - return state['website'] - - if not silent: - return work() - else: - class DevNull(): - def write(self, *a, **kw): pass - def flush(self, *a, **kw): pass - devnull = DevNull() - - try: - sys.stdout = devnull - website = work() - finally: - sys.stdout = sys.__stdout__ - - return website - - - def main(self, argv=None): - """http://aspen.io/cli/ - """ - try: - argv = argv if argv is not None else self.argv - algorithm = self.get_algorithm() - algorithm.run(argv=argv) - except (SystemExit, KeyboardInterrupt): - - # Under some (most?) network engines, a SIGINT will be trapped by the - # SIGINT signal handler above. However, gevent does "something" with - # signals and our signal handler never fires. However, we *do* get a - # KeyboardInterrupt here in that case. *shrug* - # - # See: https://github.com/gittip/aspen-python/issues/196 - - pass - except: - import aspen, traceback - aspen.log_dammit("Oh no! Aspen crashed!") - aspen.log_dammit(traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/aspen/sockets/__init__.py b/aspen/sockets/__init__.py deleted file mode 100644 index 2eea104f4..000000000 --- a/aspen/sockets/__init__.py +++ /dev/null @@ -1,155 +0,0 @@ -""" -aspen.sockets -+++++++++++++ - -Implement the server side of Socket.IO. - - https://github.com/learnboost/socket.io-spec - -Ah, abstraction! This is a whole convoluted mess to provide some pretty nice -API inside socket resources. Here are the objects involved on the server side, -from the inside out: - - Message a Socket.IO message, a colon-delimited set of bytestrings - Packet a Socket.IO packet, a message or series of framed messages - Buffer a Socket.IO buffer, buffers incoming and outgoing messages - Loop an object responsible for repeatedly calling socket.tick - Socket a Socket.IO socket, maintains state - Channel an object that represents all connections to a single Resource - Transport a Socket.IO transport mechanism, does HTTP work - Resource an HTTP resource, a file on your filesystem, application logic - Response an HTTP Response message - Request an HTTP Request message - - Engine fits somewhere, handles networking implementation; Buffer and - Loop attributes point to implementations of the above - - -A specially-crafted HTTP request creates a new Socket. That socket object -exists until one of these conditions is met: - - - the application explicitly disconnects - - the client explicitly disconnects - - the client disappears (for some definition of "disappears") - -A second specially-crafted HTTP request negotiates a Transport. Subsequent -specially-crafted HTTP requests are marshalled into socket reads and writes -according to the Transport negotiated. - -The Loop object is responsible for running socket.tick until it is told to stop -(as a result of one of the above three conditions). socket.tick exec's the -third page of the application's socket resource in question. This code is -expected to block. For ThreadedLoop that means we can't stop the loop inside of -native code. The ThreadedBuffer object cooperates with ThreadedLoop, so if your -application only ever blocks on socket.recv then you are okay. CooperativeLoops -should be immediately terminable assuming your application and its dependencies -cooperate ;-). - -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from aspen import Response - -FFFD = u'\ufffd'.encode('utf-8') -HEARTBEAT = 15 -TIMEOUT = 10 -TRANSPORTS = ['xhr-polling'] - -from aspen.sockets.channel import Channel -from aspen.sockets.socket import Socket -from aspen.sockets.transport import XHRPollingTransport - - -__sockets__ = {} -__channels__ = {} - - -def get(request): - """Takes a Request object and returns a Response or Transport object. - - When we get the request it has socket set to a string, the path part after - *.sock, which is something like 1/websocket/43ef6fe7?foo=bar. - - 1 protocol (we only support 1) - websocket transport - 43ef6fe7 socket id (sid) - ?foo=bar querystring - - The Socket.IO handshake is a GET request to 1/. We return Response for the - handshake. After the handshake, subsequent messages are to the full URL as - above. We return a Transported instance for actual messages. - - """ - - # Exit early. - # =========== - - if request.socket is None: - return None - - - # Parse and validate the socket URL. - # ================================== - - parts = request.socket.split('/') - nparts = len(parts) - if nparts not in (2, 3): - msg = "Expected 2 or 3 path parts for Socket.IO socket, got %d." - raise Response(400, msg % nparts) - - protocol = parts[0] - if protocol != '1': - msg = "Expected Socket.IO protocol version 1, got %s." - raise Response(400, msg % protocol) - - - # Handshake - # ========= - - if len(parts) == 2: - - # Note that since we're indexing on URI path instead of fs path, - # wildcard socket simplates like %foo.sock will end up differentiated - # here. - - path = request.line.uri.path.raw - if path in __channels__: - channel = __channels__[path] - else: - channel = Channel(path, request.website.network_engine.Buffer) - __channels__[path] = channel - - socket = Socket(request, channel) - assert socket.sid not in __sockets__ # sanity check - __sockets__[socket.sid] = socket - socket.loop.start() - - return socket.shake_hands() # a Response - - - # More than a handshake. - # ====================== - - transport = parts[1] - sid = parts[2] - - if transport not in TRANSPORTS: - msg = "Expected transport in {%s}, got %s." - msg %= (",".join(TRANSPORTS), transport) - raise Response(400, msg) - - if sid not in __sockets__: - msg = "Expected %s in cache, didn't find it" - raise Response(400, msg % sid) - - if type(__sockets__[sid]) is Socket: - # This is the first request after a handshake. It's not until this - # point that we know what transport the client wants to use. - Transport = XHRPollingTransport # XXX derp - __sockets__[sid] = Transport(__sockets__[sid]) - - transport = __sockets__[sid] - return transport diff --git a/aspen/sockets/buffer.py b/aspen/sockets/buffer.py deleted file mode 100644 index 6d2ec9f21..000000000 --- a/aspen/sockets/buffer.py +++ /dev/null @@ -1,117 +0,0 @@ -""" -aspen.sockets.buffer -~~~~~~~~~~~~~~~~~~~~ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import Queue -import sys -import threading -import time - -from aspen.sockets import packet -from aspen.sockets.loop import Die - - -if sys.version_info < (2, 6): # patch - threading._Event.is_set = threading._Event.isSet - - -class ThreadedBuffer(Queue.Queue): - """Model a buffer of items. - - There are two of these for each Socket, one for incoming message payloads - and one for outgoing message objects. - - Here's what the flow looks like: - - wire => [msg, msg, msg, msg, msg, msg, msg, msg] => resource - wire <= [msg, msg, msg, msg, msg, msg, msg, msg] <= resource - - """ - - def __init__(self, name, socket=None): - """Takes a string and maybe a socket. - - If given a socket, we will try to play nice with its loop. - - """ - Queue.Queue.__init__(self) - self._socket = socket - self._name = name - - - # flush - # ===== - # Used for outgoing buffer. - - def flush(self): - """Return an iterable of bytestrings or None. - """ - if self.queue: - return self.__flusher() - return None - - def __flusher(self): - """Yield strings. - - We unload bytestrings as fast as we can until we run out of time or - bytestrings. On my MacBook Pro I am seeing between 500 and 1000 - messages dumped in 2ms--without any WSGI/HTTP/TCP overhead. We always - yield at least one bytestring to avoid deadlock. - - This generator is instantiated in self.flush. - - """ - if self.queue: - yield packet.frame(self.get()) - timeout = time.time() + (0.007) # We have 7ms to dump bytestrings. Go! - while self.queue and time.time() < timeout: - yield packet.frame(self.get()) - - - # next - # ==== - # Used for incoming buffer. - - def next(self): - """Return the next item from the queue. - - The first time this is called, we lazily instantiate the generator at - self._blocked. Subsequent calls are directed directly to that - generator's next method. - - """ - self._blocked = self._blocked() - self.next = self._next - return self.next() - - def _next(self): - try: - return self._blocked.next() - except StopIteration: - # When the _blocked generator discovers Die and breaks, the - # effect is a StopIteration here. It's a bug if this happens - # other than when we are disconnecting the socket. - assert self._socket is not None - assert self._socket.loop.please_stop.is_set() - - def _blocked(self): - """Yield items from self forever. - - This generator is lazily instantiated in self.next. It is designed to - cooperate with ThreadedLoop. - - """ - if self._socket is None: # We're on a Channel. - while 1: - yield self.get() - else: # We're on a Socket. - while not self._socket.loop.please_stop.is_set(): - out = self.get() - if out is Die: - break # will result in a StopIteration - yield out diff --git a/aspen/sockets/channel.py b/aspen/sockets/channel.py deleted file mode 100644 index 0186d541d..000000000 --- a/aspen/sockets/channel.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -aspen.sockets.channel -~~~~~~~~~~~~~~~~~~~~~ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -class Channel(list): - """Model a pub/sub channel as a list of socket objects. - """ - - def __init__(self, name, Buffer): - """Takes a bytestring and Buffer class. - """ - self.name = name - self.incoming = Buffer('incoming') - - def add(self, socket): - """Override to check for sanity. - """ - assert socket not in self # sanity check - self.append(socket) - - def disconnect_all(self): - for i in range(len(self)): - self[i].disconnect() - - def send(self, data): - for socket in self: - socket.send(data) - - def send_event(self, data): - for socket in self: - socket.send_event(data) - - def send_json(self, data): - for socket in self: - socket.send_json(data) - - def send_utf8(self, data): - for socket in self: - socket.send_utf8(data) - - def notify(self, name, *args): - for socket in self: - socket.notify(name, *args) diff --git a/aspen/sockets/event.py b/aspen/sockets/event.py deleted file mode 100644 index 1bb1780e7..000000000 --- a/aspen/sockets/event.py +++ /dev/null @@ -1,18 +0,0 @@ -""" -aspen.sockets.event -~~~~~~~~~~~~~~~~~~~ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -class Event(dict): - - def __init__(self, bytes): - """Takes valid Socket.IO event JSON. - """ - d = json.loads(bytes) - self.update(d) - self.name = d['name'] - self.args = d['args'] diff --git a/aspen/sockets/loop.py b/aspen/sockets/loop.py deleted file mode 100644 index a864a7473..000000000 --- a/aspen/sockets/loop.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -aspen.sockets.loop -~~~~~~~~~~~~~~~~~~ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import threading - - -class Die: - pass - - -class ThreadedLoop(threading.Thread): - """Model a loop using a thread. - - Our architecture here is one thread per persistent socket. Depending on the - transport we probably have another thread already occupied with the HTTP - side of the request, from the CherryPy/Rocket threadpool. Assuming the - thread pool is larger than our concurrent user base, we have two threads - per persistent connection, in addition to the thread burden of any - stateless HTTP traffic. - - """ - - def __init__(self, socket): - """Takes a socket object. - """ - threading.Thread.__init__(self) - self.socket = socket - self.please_stop = threading.Event() - self.daemon = True - - def run(self): - while not self.please_stop.is_set(): - self.socket.tick() - - def start(self): - threading.Thread.start(self) - - def stop(self): - """Stop the socket loop thread. - - We signal to the thread loop to exit as soon as the next blocking - operation is complete, and then we attempt to unblock one of those - possible blocking operations: reading the incoming buffer. - - """ - # stop running tick as soon as possible - self.please_stop.set() - - # unblock reads from incoming - self.socket.incoming.put(Die) - - # wait for magic to work - self.join() - diff --git a/aspen/sockets/message.py b/aspen/sockets/message.py deleted file mode 100644 index 279ccea7c..000000000 --- a/aspen/sockets/message.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -aspen.sockets.message -~~~~~~~~~~~~~~~~~~~~~ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from aspen import json, Response - - -RESERVED_EVENTS = [ 'message' - , 'connect' - , 'disconnect' - , 'open' - , 'close' - , 'error' - , 'retry' - , 'reconnect' - ] - - -class Message(object): - """Model a Socket.IO message. - """ - - def __init__(self, type_=0, id='', endpoint='', data=''): - self.type = type_ - self.id = id - self.endpoint = endpoint - self.data = data - - @classmethod - def from_bytes(cls, bytes): - parts = bytes.split(b':', 3) - if len(parts) == 3: - parts.append(b'') # data part is optional - if len(parts) != 4: # "::".split(":", 3) == ['', '', ''] - raise SyntaxError("This message has too few colons: %s." % bytes) - return cls(*parts) - - def __repr__(self): - return "" % self - - def __str__(self): - data = self.data - if self.type in (4, 5): - data = json.dumps(data) - return ":".join([ str(self.type) - , self.id - , self.endpoint - , data - ]) - - def __cmp__(self, other): - return cmp(str(self), str(other)) - - # type - # ==== - - __type = 0 - - def _get_type(self): - return self.__type - - def _set_type(self, type_): - try: - type_ = int(type_) - assert type_ in list(range(9)) - except (ValueError, AssertionError), exc: - raise ValueError("The message type is not in 0..8: %s." % type_) - self.__type = type_ - - type = property(_get_type, _set_type) - - - # data - # ==== - - __data = '' - - def _get_data(self): - return self.__data - - def _set_data(self, data): - if self.type == 4: # json - data = json.loads(data) - elif self.type == 5: # event - data = json.loads(data) - if 'name' not in data: - raise ValueError("An event message must have a 'name' key.") - if 'args' not in data: - raise ValueError("An event message must have an 'args' key.") - if data['name'] in RESERVED_EVENTS: - msg = "That event name is reserved: %s." % data['name'] - raise ValueError(msg) - self.__data = data - - data = property(_get_data, _set_data) - diff --git a/aspen/sockets/packet.py b/aspen/sockets/packet.py deleted file mode 100644 index 7ea5de125..000000000 --- a/aspen/sockets/packet.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -aspen.sockets.packet -~~~~~~~~~~~~~~~~~~~~ - -Packets. - -Socket.IO packets contain one or more frames of this format: - - \ufffdlength\ufffdencoded-message - -Alternately, a packet can contain a single encoded-message, without framing. - -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from aspen.sockets import FFFD -from aspen.sockets.message import Message - - -class Packet(object): - """Model a Socket.IO packet. It takes bytes and yields Messages. - """ - - def __init__(self, bytes): - self.bytes = bytes - - def __iter__(self): - """Yield Message objects. - """ - if self.bytes[:3] != FFFD: - yield Message.from_bytes(self.bytes) - else: - frames = self.bytes.split(FFFD) - frames = frames[1:] # discard initial empty string - nframes = len(frames) - if nframes % 2 != 0: - msg = b"There are an odd number of frames in this packet: " - msg += self.bytes - raise SyntaxError(msg) - while frames: - # frames == [nbytes, bytes, nbytes, bytes, ...] - # We only care about bytes. - yield Message.from_bytes(frames[1]) - frames = frames[2:] - - -def frame(bytes): - bytes = str(bytes) - return b"%s%d%s%s" % (FFFD, len(bytes), FFFD, bytes) diff --git a/aspen/sockets/socket.py b/aspen/sockets/socket.py deleted file mode 100644 index f2f413840..000000000 --- a/aspen/sockets/socket.py +++ /dev/null @@ -1,177 +0,0 @@ -""" -aspen.sockets.socket -~~~~~~~~~~~~~~~~~~~~ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import uuid - -from aspen import json, resources, Response -from aspen.sockets import HEARTBEAT, TIMEOUT, TRANSPORTS -from aspen.sockets.message import Message -from aspen.sockets.packet import Packet - - -class Socket(object): - """Model a persistent Socket.IO socket (regardless of transport). - - Socket objects sit between Aspen's HTTP machinery and your Resource. They - function as middleware, and the recv/send and _recv/_send semantics reflect - this. They (the sockets) are persistent. - - """ - - transports = ",".join(TRANSPORTS) - heartbeat = str(HEARTBEAT) - timeout = str(TIMEOUT) - - - def __init__(self, request, channel): - """Takes the handshake request and the socket's channel. - """ - self.sid = uuid.uuid4().hex - self.endpoint = request.line.uri.path.decoded - self.resource = resources.get(request) - - self.website = request.website - self.loop = request.website.network_engine.Loop(self) - self.incoming = request.website.network_engine.Buffer('incoming', self) - self.outgoing = request.website.network_engine.Buffer('outgoing', self) - self.channel = channel - self.channel.add(self) - self.context = self.resource.exec_second(self, request) - - def shake_hands(self): - """Return a handshake response. - """ - handshake = ":".join([ self.sid - , self.heartbeat - , self.timeout - , self.transports - ]) - return Response(200, handshake) - - def tick(self): - """Exec the third page of the resource. - - It is expected that socket resources will block via self.recv() or some - other mechanism, like reading a remote TCP socket. - - """ - exec self.resource.pages[2] in self.context - - def disconnect(self): - self.loop.stop() - if len(self.resource.pages) > 3: - exec self.resource.pages[3] in self.context - self.channel.remove(self) - - - # Client Side - # =========== - # Call these inside of your Resource. - - def sleep(self, seconds): - """Block until seconds have elapsed. - """ - self.engine.sleep(seconds) - - def recv(self): - """Block until the next message is available, then return it. - """ - return self.incoming.next() - - - def send(self, data): - """Buffer a plain message to be sent to the client. - """ - self.__send(3, data) - - def send_utf8(self): - """Buffer a UTF-8 message to be sent to the client. - """ - self.__send(3, data.encode('utf8')) - - def send_json(self, data): - """Buffer a JSON message to be sent to the client. - """ - if not isinstance(data, basestring): - data = json.dumps(data) - self.__send(4, data) - - def send_event(self, data): - """Buffer an event message to be sent to the client. - """ - if not isinstance(data, basestring): - data = json.dumps(data) - self.__send(5, data) - - def __send(self, type_, data): - message = Message() - message.type = type_ - message.endpoint = self.endpoint - message.data = data - self.outgoing.put(message) - - - # Event API - # ========= - # Working with events is so common that we offer these conveniences. - - def listen(self, *filter): - """Given a series of events to listen for, return a tuple. - - The return value is a tuple of the event name and data. If no events - are specified, the first event is returned. - - """ - while 1: - msg = self.incoming.next() - if not filter or msg['name'] in filter: - break - return (msg['name'], msg['args']) - - def notify(self, name, *args): - """This is a convenience function for event notification. - - The first argument is the name of the event, and subsequent arguments - show up as the arguments to the callback function in your event - listener on the client side. - - """ - self.send_event({"name": name, "args": args}) - - - # Server Side - # =========== - # These are called by Aspen's HTTP machinery. - - def _recv(self): - """Return an iterator of bytes or None. Don't block. - """ - return self.outgoing.flush() - - def _send(self, bytes): - """Given a packet bytestring, process messages. - """ - packet = Packet(bytes) - for message in packet: - # https://github.com/learnboost/socket.io-spec - if message.endpoint != self.endpoint: - msg = "The %s endpoint got a message intended for %s." - msg %= self.endpoint, message.endpoint - raise RuntimeError(msg) - if message.type == 0: # disconnect - self.disconnect() - elif message.type == 1: # connect - pass - elif message.type == 2: # heartbeat - pass - elif message.type in (3, 4, 5): # data message - self.incoming.put(message.data) - self.channel.incoming.put(message.data) - elif message.type in (6, 7, 8): # blah, blah, blah - pass diff --git a/aspen/sockets/transport.py b/aspen/sockets/transport.py deleted file mode 100644 index 07f27c580..000000000 --- a/aspen/sockets/transport.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -aspen.sockets.transport -~~~~~~~~~~~~~~~~~~~~~~~ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import time - -from aspen import Response -from aspen.sockets import TIMEOUT - - -class Transport(object): - """A transport converts HTTP messages into Socket messages. - """ - - def __init__(self, socket): - """Takes a Socket instance. - """ - self.socket = socket - - -class XHRPollingTransport(Transport): - - state = 0 - timeout = TIMEOUT * 0.90 # Allow for some wiggle-room to prevent XHRs - # from cancelling too often. - - def respond(self, request): - """Given a Request, return a Response. - """ - request.allow('GET', 'POST') - - if self.state == 0: # The client wants confirmation. - response = Response(200, "1:::") - self.state = 1 - - elif request.line.method == 'POST': # The client is sending us data. - self.socket._send(request.body.raw) - response = Response(200) - - elif request.line.method == 'GET': # The client is asking for data. - bytes_iter = iter([""]) - timeout = time.time() + self.timeout - while time.time() < timeout: - _bytes_iter = self.socket._recv() - if _bytes_iter is not None: - bytes_iter = _bytes_iter - break - request.website.network_engine.sleep(0.010) - response = Response(200, bytes_iter) - - return response - - def disconnect(self): - pass diff --git a/aspen/testing/client.py b/aspen/testing/client.py index b5e933fcb..74d020576 100644 --- a/aspen/testing/client.py +++ b/aspen/testing/client.py @@ -11,9 +11,8 @@ from StringIO import StringIO from aspen import Response -from aspen.server import Server from aspen.utils import typecheck - +from aspen.website import Website BOUNDARY = b'BoUnDaRyStRiNg' MULTIPART_CONTENT = b'multipart/form-data; boundary=%s' % BOUNDARY @@ -64,7 +63,7 @@ def hydrate_website(self, argv=None): argv = [ '--www_root', self.www_root , '--project_root', self.project_root ] + ([] if argv is None else argv) - self._website = Server(argv).get_website() + self._website = Website(argv) return self._website website = property(hydrate_website) diff --git a/aspen/testing/harness.py b/aspen/testing/harness.py index a151a2489..737183917 100644 --- a/aspen/testing/harness.py +++ b/aspen/testing/harness.py @@ -11,12 +11,7 @@ import sys from collections import namedtuple -from aspen import resources, sockets -from aspen.http.request import Request -from aspen.network_engines import ThreadedBuffer -from aspen.sockets.channel import Channel -from aspen.sockets.socket import Socket -from aspen.sockets.transport import XHRPollingTransport +from aspen import resources from aspen.testing.client import Client from filesystem_tree import FilesystemTree @@ -31,17 +26,12 @@ def teardown(): - remove FSFIX = %{tempdir}/fsfix - reset Aspen's global state - clear out sys.path_importer_cache - - clear out execution.extras """ os.chdir(CWD) # Reset some process-global caches. Hrm ... resources.__cache__ = {} - sockets.__sockets__ = {} - sockets.__channels__ = {} sys.path_importer_cache = {} # see test_weird.py - import aspen.execution - aspen.execution.clear_changes() teardown() # start clean @@ -91,43 +81,3 @@ def make_request(self, *a, **kw): kw['return_after'] = 'dispatch_request_to_filesystem' kw['want'] = 'request' return self.simple(*a, **kw) - - - # Sockets - # ======= - - def make_transport(self, content='', state=0): - self.fs.www.mk(('echo.sock.spt', content)) - socket = self.make_socket() - transport = XHRPollingTransport(socket) - transport.timeout = 0.05 # for testing, could screw up the test - if state == 1: - transport.respond(Request(uri='/echo.sock')) - return transport - - def make_socket_request(self, filename='echo.sock.spt'): - request = Request(uri='/echo.sock') - request.website = self.client.website - request.fs = self.fs.www.resolve(filename) - return request - - def make_socket(self, filename='echo.sock.spt', channel=None): - request = self.make_socket_request(filename='echo.sock.spt') - if channel is None: - channel = Channel(request.line.uri.path.raw, ThreadedBuffer) - socket = Socket(request, channel) - return socket - - def SocketInThread(harness): - - class _SocketInThread(object): - - def __enter__(self, filename='echo.sock.spt'): - self.socket = harness.make_socket(filename) - self.socket.loop.start() - return self.socket - - def __exit__(self, *a): - self.socket.loop.stop() - - return _SocketInThread() diff --git a/build.py b/build.py index c53af628a..2371fc7a2 100644 --- a/build.py +++ b/build.py @@ -59,9 +59,10 @@ def _env(): def aspen(): - if os.path.exists(_virt('aspen')): - return _env() + v = shell(_virt('python'), '-c', 'import aspen; print("found")', ignore_status=True) + if "found" in v: + return for dep in ASPEN_DEPS: run(_virt('pip'), 'install', '--no-index', '--find-links=' + INSTALL_DIR, dep) @@ -102,7 +103,7 @@ def docs(): aspen() run(_virt('pip'), 'install', 'aspen-tornado') run(_virt('pip'), 'install', 'pygments') - shell(_virt('aspen'), '-a:5370', '-wdoc', '-pdoc/.aspen', + shell(_virt('python'), '-m', 'aspen', '-a:5370', '-wdoc', '-pdoc/.aspen', '--changes_reload=1', silent=False) @@ -110,7 +111,7 @@ def smoke(): aspen() run('mkdir', smoke_dir) open(os.path.join(smoke_dir, "index.html"), "w").write("Greetings, program!") - run(_virt('aspen'), '-w', smoke_dir) + run(_virt('python'), '-m', 'aspen', '-w', smoke_dir) def clean_smoke(): diff --git a/doc/.aspen/configure-aspen.py b/doc/.aspen/configure-aspen.py index 50b6e2c37..fb292fb54 100644 --- a/doc/.aspen/configure-aspen.py +++ b/doc/.aspen/configure-aspen.py @@ -3,6 +3,7 @@ from aspen.configuration import parse from aspen_io import opts, add_stuff_to_request_context +os.chdir(website.www_root) opts['show_ga'] = parse.yes_no(os.environ.get( 'ASPEN_IO_SHOW_GA' , 'no' diff --git a/doc/api/website/index.html.spt b/doc/api/website/index.html.spt index 79ff50c3d..a842e0c2f 100644 --- a/doc/api/website/index.html.spt +++ b/doc/api/website/index.html.spt @@ -27,8 +27,6 @@ line (--name):

logging_threshold0 (most verbose) media_type_defaulttext/plain media_type_jsonapplication/json - network_enginecheroot - network_address(u'0.0.0.0', 8080), socket.AF_INET) project_rootNone renderer_defaultstdlib_percent show_tracebacksFalse @@ -63,15 +61,6 @@ configuration is processed, and before your renderer_factories and default_renderers_by_media_type. -
  • The network_engine attribute is hydrated into an Engine object. - If you haven’t installed the necessary underlying library - you’ll get an ImportError to this effect.
  • - -
  • The network_address attribute is broken out into - network_address, network_sockfam, and network_port attributes. These are - used by the aspen.server machinery and the Engines to connect - to the network. -

    After your configuration scripts are run, Aspen looks at the value for diff --git a/doc/aspen/index.html.spt b/doc/aspen/index.html.spt deleted file mode 100644 index 8dcb036d5..000000000 --- a/doc/aspen/index.html.spt +++ /dev/null @@ -1,86 +0,0 @@ -doc_title="aspen" -doc_next = '' -[----------------------------------------] -{% extends doc.html %} -{% block doc %} - - - -

    You can serve Aspen websites with any WSGI - server, and Aspen also bundles a production-quality server of its own. -It’s called ... aspen!

    - -
    Usage: aspen [options]
    -
    -Aspen is a Python web framework. By default this program will start serving a
    -website from the current directory on port 8080. Options are as follows. See
    -also http://aspen.io/.
    -
    -Options:
    -  --version             show program's version number and exit
    -  -h, --help            show this help message and exit
    -
    -  Basic Options:
    -    -f CONFIGURATION_SCRIPTS, --configuration_scripts=CONFIGURATION_SCRIPTS
    -                        comma-separated list of paths to configuration files
    -                        in Python syntax to exec in addition to
    -                        $ASPEN_PROJECT_ROOT/configure-aspen.py
    -    -a NETWORK_ADDRESS, --network_address=NETWORK_ADDRESS
    -                        the IPv4, IPv6, or Unix address to bind to
    -                        [0.0.0.0:8080]
    -    -e NETWORK_ENGINE, --network_engine=NETWORK_ENGINE
    -                        the HTTP engine to use, one of {cheroot,cherrypy,
    -                        diesel,eventlet,gevent,pants,rocket,tornado,twisted}
    -                        [cheroot]
    -    -l LOGGING_THRESHOLD, --logging_threshold=LOGGING_THRESHOLD
    -                        a small integer; 1 will suppress most of aspen's
    -                        internal logging, 2 will suppress all it [0]
    -    -p PROJECT_ROOT, --project_root=PROJECT_ROOT
    -                        the filesystem path of the directory in which to look
    -                        for project files like template bases and such.[]
    -    -w WWW_ROOT, --www_root=WWW_ROOT
    -                        the filesystem path of the document publishing root
    -                        [.]
    -
    -  Extended Options:
    -    I judge these variables to be less-often configured from the command
    -    line. But who knows?
    -
    -    --changes_reload=CHANGES_RELOAD
    -                        if set to yes/true/1, changes to configuration files
    -                        and Python modules will cause aspen to re-exec, and
    -                        template bases won't be cached [no]
    -    --charset_dynamic=CHARSET_DYNAMIC
    -                        this is set as the charset for rendered and negotiated
    -                        resources of Content-Type text/* [UTF-8]
    -    --charset_static=CHARSET_STATIC
    -                        if set, this will be sent as the charset for static
    -                        resources of Content-Type text/*; if you want to punt
    -                        and let browsers guess, then just leave this unset []
    -    --indices=INDICES   a comma-separated list of filenames to look for when a
    -                        directory is requested directly; prefix with + to
    -                        extend previous configuration instead of overriding
    -                        [index.html, index.json, index.html.spt, 
    -                        index.json.spt]
    -    --list_directories=LIST_DIRECTORIES
    -                        if set to {yes,true,1}, aspen will serve a directory
    -                        listing when no index is available [no]
    -    --media_type_default=MEDIA_TYPE_DEFAULT
    -                        this is set as the Content-Type for resources of
    -                        otherwise unknown media type [text/plain]
    -    --media_type_json=MEDIA_TYPE_JSON
    -                        this is set as the Content-Type of JSON resources
    -                        [application/json]
    -    --renderer_default=RENDERER_DEFAULT
    -                        the renderer to use by default; one of
    -                        {stdlib_percent,stdlib_format,stdlib_template,jinja2,
    -                         pystache,tornado} [stdlib_percent]
    -    --show_tracebacks=SHOW_TRACEBACKS
    -                        if set to {yes,true,1}, 500s will have a traceback in
    -                        the browser [no]
    -
    -
    -{% end %} diff --git a/doc/configuration/index.html.spt b/doc/configuration/index.html.spt index 102d71bcb..0493b0000 100644 --- a/doc/configuration/index.html.spt +++ b/doc/configuration/index.html.spt @@ -20,8 +20,6 @@ line. For each of the following there is an environment variable like logging_threshold 1 media_type_default text/plain media_type_json application/json - network_engine gevent - network_address :5370 project_root /usr/local/mysite show_tracebacks True www_root /usr/local/mysite/www diff --git a/doc/deployment/index.html.spt b/doc/deployment/index.html.spt index 947d38758..7dde43b2b 100644 --- a/doc/deployment/index.html.spt +++ b/doc/deployment/index.html.spt @@ -11,18 +11,6 @@ you need more control, you can instantiate that class yourself. In any case, here are some ways to get it on the network.

    -

    Aspen

    - -

    Aspen bundles its own server, called aspen. It is suitable for production.

    - -
    -$ cd /path/to/my/website
    -$ aspen
    -Greetings, program! Welcome to port 8080.
    -
    - -

    Gunicorn

    Gunicorn is a preforking server ported diff --git a/doc/index.html.spt b/doc/index.html.spt index 0a4bac6af..cd981d755 100644 --- a/doc/index.html.spt +++ b/doc/index.html.spt @@ -385,15 +385,6 @@ homepage = True for your Mac (screenshot)

  • -
  • Pictionary demo - of Socket.IO integration
  • - -
  • A couple - demos of Aspen’s Socket.IO integration
  • - -
  • Aspen + - diesel talking to a STOMP server
  • -
  • A toy blog, demonstrating Aspen’s core features
  • @@ -446,11 +437,6 @@ homepage = True
  • project_root
  • -
  • Executable - -
  • diff --git a/doc/quick-start/index.html.spt b/doc/quick-start/index.html.spt index 471d01986..a8d2f0e7e 100644 --- a/doc/quick-start/index.html.spt +++ b/doc/quick-start/index.html.spt @@ -28,13 +28,16 @@ blah
    (foo)$ mkdir www
     (foo)$ cd www
    -

    Step 4: Create a web page, and start aspen inside it:

    +

    Step 4: Create a web page:

    -
    (foo)$ echo Greetings, program! > index.html.spt
    -(foo)$ aspen
    -Greetings, program! Welcome to port 8080.
    +
    (foo)$ echo Greetings, program! > index.html.spt
    -

    Step 5: Check localhost for +

    Step 5: Start a dev server:

    + +
    (foo)$ python -m aspen
    +[...] Greetings, program! Welcome to port 8080.
    + +

    Step 6: Check localhost for your new page!

    {{ screenshot("greetings-program") }} diff --git a/docs/index.rst b/docs/index.rst index 32eb0c682..ba38607d2 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -15,11 +15,6 @@ Library Reference :member-order: bysource :special-members: -.. automodule:: aspen.algorithms.server - :members: - :member-order: bysource - :special-members: - .. automodule:: aspen.algorithms.website :members: :member-order: bysource @@ -125,21 +120,6 @@ Library Reference :member-order: bysource :special-members: -.. automodule:: aspen.network_engines - :members: - :member-order: bysource - :special-members: - -.. automodule:: aspen.network_engines.cheroot_ - :members: - :member-order: bysource - :special-members: - -.. automodule:: aspen.network_engines.gevent_ - :members: - :member-order: bysource - :special-members: - .. automodule:: aspen.renderers :members: :member-order: bysource @@ -195,11 +175,6 @@ Library Reference :member-order: bysource :special-members: -.. automodule:: aspen.resources.socket_resource - :members: - :member-order: bysource - :special-members: - .. automodule:: aspen.resources.static_resource :members: :member-order: bysource @@ -210,51 +185,6 @@ Library Reference :member-order: bysource :special-members: -.. automodule:: aspen.sockets - :members: - :member-order: bysource - :special-members: - -.. automodule:: aspen.sockets.buffer - :members: - :member-order: bysource - :special-members: - -.. automodule:: aspen.sockets.channel - :members: - :member-order: bysource - :special-members: - -.. automodule:: aspen.sockets.event - :members: - :member-order: bysource - :special-members: - -.. automodule:: aspen.sockets.loop - :members: - :member-order: bysource - :special-members: - -.. automodule:: aspen.sockets.message - :members: - :member-order: bysource - :special-members: - -.. automodule:: aspen.sockets.packet - :members: - :member-order: bysource - :special-members: - -.. automodule:: aspen.sockets.socket - :members: - :member-order: bysource - :special-members: - -.. automodule:: aspen.sockets.transport - :members: - :member-order: bysource - :special-members: - .. automodule:: aspen.testing :members: :member-order: bysource diff --git a/setup.py b/setup.py index 194959d13..056c3fcc4 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ , 'Programming Language :: Python :: 2.7' , 'Programming Language :: Python :: Implementation :: CPython' , 'Programming Language :: Python :: Implementation :: Jython' - , 'Topic :: Internet :: WWW/HTTP :: HTTP Servers' + , 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application' ] setup( author = 'Chad Whitacre' @@ -29,10 +29,7 @@ , classifiers = classifiers , description = ('Aspen is a Python web framework. ' 'Simplates are the main attraction.') - , entry_points = {'console_scripts': [ 'aspen = aspen.server:main' - , 'thrash = thrash:main' - , 'fcgi_aspen = fcgi_aspen:main [fcgi]' - ]} + , entry_points = {'console_scripts': ['fcgi_aspen = fcgi_aspen:main [fcgi]']} , name = 'aspen' , packages = find_packages(exclude=['aspen.tests', 'aspen.tests.*']) , py_modules = ['thrash', 'fcgi_aspen'] @@ -40,8 +37,7 @@ , version = version , zip_safe = False , package_data = {'aspen': ['www/*', 'configuration/mime.types']} - , install_requires = [ 'Cheroot==4.0.0beta' - , 'mimeparse==0.1.3' + , install_requires = [ 'mimeparse==0.1.3' , 'first==2.0.1' , 'algorithm>=1.0.0' , 'filesystem_tree>=1.0.0' diff --git a/tests/test_configuration.py b/tests/test_configuration.py index 8a5eaee52..145cb6570 100644 --- a/tests/test_configuration.py +++ b/tests/test_configuration.py @@ -5,11 +5,9 @@ import os import sys -import socket from pytest import raises, mark -import aspen from aspen.configuration import Configurable, ConfigurationError, parse from aspen.configuration.options import OptionParser, DEFAULT from aspen.website import Website @@ -19,8 +17,6 @@ def test_everything_defaults_to_empty_string(): o = OptionParser() opts, args = o.parse_args([]) actual = ( opts.configuration_scripts - , opts.network_address - , opts.network_engine , opts.logging_threshold , opts.project_root , opts.www_root @@ -34,8 +30,8 @@ def test_everything_defaults_to_empty_string(): , opts.renderer_default , opts.show_tracebacks ) - expected = ( DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT - , DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT + expected = ( DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT + , DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT ) assert actual == expected @@ -111,15 +107,10 @@ def test_ConfigurationError_NOT_raised_if_no_cwd_but_do_have__www_root(harness): def test_configurable_sees_root_option(harness): c = Configurable() c.configure(['--www_root', harness.fs.project.resolve('')]) - expected = os.getcwd() + expected = harness.fs.project.root actual = c.www_root assert actual == expected -def test_address_can_be_localhost(): - expected = (('127.0.0.1', 8000), 2) - actual = parse.network_address(u'localhost:8000') - assert actual == expected - def test_configuration_scripts_works_at_all(): o = OptionParser() opts, args = o.parse_args(['--configuration_scripts', "foo"]) @@ -217,46 +208,3 @@ def test_parse_renderer_good(): def test_parse_renderer_bad(): raises(ValueError, parse.renderer, u'floober') - - -def test_parse_network_engine_good(): - actual = parse.network_engine(u'cheroot') - assert actual == 'cheroot' - -def test_parse_network_engine_bad(): - raises(ValueError, parse.network_engine, u'floober') - -@mark.xfail(sys.platform == 'win32', - reason="Unix Socket (AF_UNIX) unavailable on Windows") -def test_parse_network_address_unix_socket(): - actual = parse.network_address(u"/foo/bar") - assert actual == ("/foo/bar", socket.AF_UNIX) - -def test_parse_network_address_notices_ipv6(): - actual = parse.network_address(u"2607:f0d0:1002:51::4") - assert actual == (u"2607:f0d0:1002:51::4", socket.AF_INET6) - -def test_parse_network_address_sees_one_colon_as_ipv4(): - actual = parse.network_address(u"192.168.1.1:8080") - assert actual == ((u"192.168.1.1", 8080), socket.AF_INET) - -def test_parse_network_address_need_colon_for_ipv4(): - raises(ValueError, parse.network_address, u"192.168.1.1 8080") - -def test_parse_network_address_defaults_to_inaddr_any(): - actual = parse.network_address(u':8080') - assert actual == ((u'0.0.0.0', 8080), socket.AF_INET) - -def test_parse_network_address_with_bad_address(): - raises(ValueError, parse.network_address, u'0 0 0 0:8080') - -def test_parse_network_address_with_bad_port(): - raises(ValueError, parse.network_address, u':80 0') - -def test_parse_network_address_with_port_too_low(): - actual = raises(ValueError, parse.network_address, u':-1').value.args[0] - assert actual == "invalid port (out of range)" - -def test_parse_network_address_with_port_too_high(): - actual = raises(ValueError, parse.network_address, u':65536').value.args[0] - assert actual == "invalid port (out of range)" diff --git a/tests/test_dispatcher.py b/tests/test_dispatcher.py index 795de81e5..ec051af50 100644 --- a/tests/test_dispatcher.py +++ b/tests/test_dispatcher.py @@ -399,26 +399,6 @@ def test_virtual_path_docs_6(harness): assert_body(harness, '/1999/', "Tonight we're going to party like it's 1999!") -# intercept_socket -# ================ - -def test_intercept_socket_protects_direct_access(): - request = Request(uri="/foo.sock") - raises(Response, dispatcher.dispatch, request) - -def test_intercept_socket_intercepts_handshake(): - request = Request(uri="/foo.sock/1") - actual = dispatcher.extract_socket_info(request.line.uri.path.decoded) - expected = ('/foo.sock', '1') - assert actual == expected - -def test_intercept_socket_intercepts_transported(): - request = Request(uri="/foo.sock/1/websocket/46327hfjew3?foo=bar") - actual = dispatcher.extract_socket_info(request.line.uri.path.decoded) - expected = ('/foo.sock', '1/websocket/46327hfjew3') - assert actual == expected - - # mongs # ===== # These surfaced when porting mongs from Aspen 0.8. diff --git a/tests/test_execution.py b/tests/test_execution.py deleted file mode 100644 index 0ccd2e21b..000000000 --- a/tests/test_execution.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from aspen import execution - -class Foo: - pass - -def test_startup_basically_works(): - website = Foo() - website.changes_kill = True - website.root = 'foo' - website.network_engine = Foo() - website.network_engine.start_checking = lambda x: x - website.configuration_scripts = [] - execution.install(website) - expected = set() - actual = execution.extras - assert actual == expected, repr(actual) + " instead of " + repr(expected) - - - diff --git a/tests/test_sockets_.py b/tests/test_sockets_.py deleted file mode 100644 index eb23eeccb..000000000 --- a/tests/test_sockets_.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from aspen import sockets -from aspen.http.request import Request - - -def test_sockets_get_nonsock_returns_None(): - request = Request() - request.socket = None - expected = None - actual = sockets.get(request) - assert actual is expected - -def test_sockets_get_adds_channel(harness): - harness.fs.www.mk(('echo.sock.spt', '[---]\n')) - request = harness.make_socket_request() - request.socket = '1/' - - try: - sockets.get(request) # handshake - - expected = '/echo.sock' - actual = sockets.__channels__['/echo.sock'].name - assert actual == expected - finally: - sockets.__channels__['/echo.sock'].disconnect_all() - -def test_channel_survives_transportation(harness): - harness.fs.www.mk(('echo.sock.spt', '[---]\n')) - request = harness.make_socket_request() - request.socket = '1/' - response = sockets.get(request) # handshake - sid = response.body.split(':')[0] - request.socket = '1/xhr-polling/' + sid - transport = sockets.get(request) # transport - - try: - expected = '/echo.sock' - actual = sockets.__channels__['/echo.sock'].name - assert actual == expected - - expected = transport.socket.channel - actual = sockets.__channels__['/echo.sock'] - assert actual is expected - finally: - transport.socket.disconnect() - - diff --git a/tests/test_sockets_buffer.py b/tests/test_sockets_buffer.py deleted file mode 100644 index 248c9254d..000000000 --- a/tests/test_sockets_buffer.py +++ /dev/null @@ -1,34 +0,0 @@ -from aspen.sockets import FFFD -from aspen.sockets.buffer import ThreadedBuffer as Buffer -from aspen.sockets.message import Message - - -def test_buffer_is_instantiable(harness): - harness.fs.www.mk(('echo.sock.spt', 'socket.send(socket.recv())')) - expected = Buffer - actual = Buffer(harness.make_socket(), 'foo').__class__ - assert actual is expected - -def test_can_put_onto_buffer(harness): - harness.fs.www.mk(('echo.sock.spt', 'socket.send(socket.recv())')) - buffer = Buffer(harness.make_socket(), 'foo') - expected = [FFFD+'4'+FFFD+'1:::'] - buffer.put(Message.from_bytes('1:::')) - actual = list(buffer.flush()) - assert actual == expected - -def test_buffer_flush_performance(): - - return # This test makes my lap hot. - - M = lambda: Message.from_bytes("3::/echo.sock:Greetings, program!") - N = 10000 - buffer = Buffer([M() for i in range(N)]) - out = list(buffer.flush()) - nbuffer = len(buffer) - nout = len(out) - assert nbuffer + nout == N - assert nout > 500 - - - diff --git a/tests/test_sockets_channel.py b/tests/test_sockets_channel.py deleted file mode 100644 index b0d7f38e9..000000000 --- a/tests/test_sockets_channel.py +++ /dev/null @@ -1,52 +0,0 @@ -from collections import deque - -from pytest import raises - -from aspen.sockets.buffer import ThreadedBuffer -from aspen.sockets.channel import Channel -from aspen.sockets.message import Message - - -def test_channel_is_instantiable(): - expected = Channel - actual = Channel('/foo.sock', ThreadedBuffer).__class__ - assert actual is expected - -def test_channel_can_have_sockets_added_to_it(harness): - harness.fs.www.mk(('echo.sock.spt', 'channel.send(channel.recv())')) - socket = harness.make_socket() - channel = Channel('foo', ThreadedBuffer) - channel.add(socket) - - expected = [socket] - actual = list(channel) - assert actual == expected - -def test_channel_raises_AssertionError_on_double_add(harness): - harness.fs.www.mk(('echo.sock.spt', '')) - socket = harness.make_socket() - channel = Channel('foo', ThreadedBuffer) - channel.add(socket) - raises(AssertionError, channel.add, socket) - -def test_channel_passes_send_on_to_one_socket(harness): - harness.fs.www.mk(('echo.sock.spt', '')) - socket = harness.make_socket() - channel = Channel('foo', ThreadedBuffer) - channel.add(socket) - channel.send('foo') - - expected = deque([Message.from_bytes('3::/echo.sock:foo')]) - actual = socket.outgoing.queue - assert actual == expected - -def test_channel_passes_send_on_to_four_sockets(harness): - harness.fs.www.mk(('echo.sock.spt', 'channel.send(channel.recv())')) - channel = Channel('foo', ThreadedBuffer) - sockets = [harness.make_socket(channel=channel) for i in range(4)] - channel.send('foo') - - for socket in sockets: - expected = deque([Message.from_bytes('3::/echo.sock:foo')]) - actual = socket.outgoing.queue - assert actual == expected diff --git a/tests/test_sockets_message.py b/tests/test_sockets_message.py deleted file mode 100644 index 12b836be5..000000000 --- a/tests/test_sockets_message.py +++ /dev/null @@ -1,135 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from pytest import raises - -from aspen.sockets.message import Message - - -def test_message_can_be_instantiated_from_bytes(): - expected = Message - actual = Message.from_bytes('3:::').__class__ - assert actual is expected - -def test_from_bytes_too_few_colons_raises_SyntaxError(): - exc = raises(SyntaxError, Message.from_bytes, '3:').value - expected = "This message has too few colons: 3:." - actual = exc.args[0] - assert actual == expected - -def test_from_bytes_data_part_is_optional(): - message = Message.from_bytes('3::') - expected = "" - actual = message.data - assert actual == expected - -def test_from_bytes_too_many_colons_and_the_extras_end_up_in_the_data(): - message = Message.from_bytes('3::::') - expected = ":" - actual = message.data - assert actual == expected - -def test_from_bytes_non_digit_type_raises_ValueError(): - exc = raises(ValueError, Message.from_bytes, 'foo:::').value - expected = "The message type is not in 0..8: foo." - actual = exc.args[0] - assert actual == expected - -def test_from_bytes_type_too_small_raises_ValueError(): - exc = raises(ValueError, Message.from_bytes, '-1:::').value - expected = "The message type is not in 0..8: -1." - actual = exc.args[0] - assert actual == expected - -def test_from_bytes_type_too_big_raises_ValueError(): - exc = raises(ValueError, Message.from_bytes, '9:::').value - expected = "The message type is not in 0..8: 9." - actual = exc.args[0] - assert actual == expected - -def test_from_bytes_type_lower_bound_instantiable(): - message = Message.from_bytes('0:::') - expected = 0 - actual = message.type - assert actual == expected - -def test_from_bytes_type_upper_bound_instantiable(): - message = Message.from_bytes('8:::') - expected = 8 - actual = message.type - assert actual == expected - -def test_id_passes_through(): - message = Message.from_bytes('3:deadbeef::') - expected = 'deadbeef' - actual = message.id - assert actual == expected - -def test_endpoint_passes_through(): - message = Message.from_bytes('3:deadbeef:/cheese.sock:') - expected = '/cheese.sock' - actual = message.endpoint - assert actual == expected - -def test_data_passes_through(): - message = Message.from_bytes('3:deadbeef:/cheese.sock:Greetings, program!') - expected = 'Greetings, program!' - actual = message.data - assert actual == expected - -def test_json_data_decoded(): - message = Message.from_bytes('''4:deadbeef:/cheese.sock:{ - "foo": "bar" -}''') - expected = {"foo": "bar"} - actual = message.data - assert actual == expected - -def test_json_roundtrip(): - bytes = '''4:deadbeef:/cheese.sock:{ - "foo": "bar" -}''' - message = Message.from_bytes(bytes) - expected = bytes - actual = str(message) - assert actual == expected - -def test_event_data_decoded(): - message = Message.from_bytes('''5:::{ - "name": "bar", "args": [] -}''') - expected = {u'args': [], u'name': 'bar'} - actual = message.data - assert actual == expected - -def test_event_data_without_name_raises_ValueError(): - exc = raises( ValueError - , Message.from_bytes - , '5:::{"noom": "bar", "args": []}' - ).value - expected = "An event message must have a 'name' key." - actual = exc.args[0] - assert actual == expected - -def test_event_data_without_args_raises_ValueError(): - exc = raises( ValueError - , Message.from_bytes - , '5:::{"name": "bar", "arrrrgs": []}' - ).value - expected = "An event message must have an 'args' key." - actual = exc.args[0] - assert actual == expected - -def test_event_data_with_reserved_name_raises_ValueError(): - exc = raises( ValueError - , Message.from_bytes - , '5:::{"name": "connect", "args": []}' - ).value - expected = "That event name is reserved: connect." - actual = exc.args[0] - assert actual == expected - - - diff --git a/tests/test_sockets_packet.py b/tests/test_sockets_packet.py deleted file mode 100644 index f3baeaf4f..000000000 --- a/tests/test_sockets_packet.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from pytest import raises - -from aspen.sockets import FFFD -from aspen.sockets.packet import Packet -from aspen.sockets.message import Message - - -def test_packet_Packetable_with_unframed_bytes(): - expected = [Message.from_bytes(b'1:::')] - actual = list(Packet(b'1:::')) - assert actual == expected - -def test_packet_Packetable_with_framed_bytes(): - expected = [Message.from_bytes(b'1:::')] - actual = list(Packet(FFFD + b'4' + FFFD + b'1:::')) - assert actual == expected - -def test_packet_Packetable_with_multiple_frames(): - expected = [Message.from_bytes(x) for x in (b'0:::', b'1:::')] - actual = list(Packet(FFFD+b'4'+FFFD+b'0:::'+FFFD+b'4'+FFFD+b'1:::')) - assert actual == expected - -def test_packet_with_odd_frames_raises_SyntaxError(): - Packet_ = lambda s: list(Packet(s)) # raises chokes on generator - raises(SyntaxError, Packet_, FFFD+b'4'+FFFD+b'0:::'+FFFD) - -def test_packet_with_odd_frames_tells_you_that(): - Packet_ = lambda s: list(Packet(s)) # raises chokes on generator - packet = FFFD+b'4'+FFFD+b'0:::'+FFFD - exc = raises(SyntaxError, Packet_, packet).value - expected = b"There are an odd number of frames in this packet: %s" % packet - actual = exc.args[0] - assert actual == expected - - - diff --git a/tests/test_sockets_socket.py b/tests/test_sockets_socket.py deleted file mode 100644 index 0344f4e5a..000000000 --- a/tests/test_sockets_socket.py +++ /dev/null @@ -1,60 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import time - -from aspen.sockets import FFFD -from aspen.sockets.socket import Socket - - -def test_socket_is_instantiable(harness): - harness.fs.www.mk(('echo.sock.spt', '')) - - expected = Socket - actual = harness.make_socket().__class__ - assert actual is expected - -def test_two_sockets_are_instantiable(harness): - harness.fs.www.mk(('echo.sock.spt', '')) - - socket1 = harness.make_socket() - socket2 = harness.make_socket() - - expected = (Socket, Socket) - actual = (socket1.__class__, socket2.__class__) - assert actual == expected - -def test_socket_can_shake_hands(harness): - harness.fs.www.mk(('echo.sock.spt', '')) - socket = harness.make_socket() - response = socket.shake_hands() - expected = '15:10:xhr-polling' - actual = response.body.split(':', 1)[1] - assert actual == expected - -def test_socket_can_barely_function(harness): - harness.fs.www.mk(('echo.sock.spt', 'socket.send("Greetings, program!")')) - - socket = harness.make_socket() - socket.tick() - - expected = FFFD+b'33'+FFFD+b'3::/echo.sock:Greetings, program!' - actual = socket._recv() - if actual is not None: - actual = actual.next() - assert actual == expected - -def test_socket_can_echo(harness): - harness.fs.www.mk(('echo.sock.spt', 'socket.send(socket.recv())')) - - with harness.SocketInThread() as socket: - socket._send(b'3::/echo.sock:Greetings, program!') - time.sleep(0.05) # give the resource time to tick - - expected = FFFD+b'33'+FFFD+b'3::/echo.sock:Greetings, program!' - actual = socket._recv() - if actual is not None: - actual = actual.next() - assert actual == expected diff --git a/tests/test_sockets_transport.py b/tests/test_sockets_transport.py deleted file mode 100644 index da8731d97..000000000 --- a/tests/test_sockets_transport.py +++ /dev/null @@ -1,110 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import time -from collections import deque -from cStringIO import StringIO - -from aspen import Response -from aspen.http.request import Request, Headers -from aspen.sockets import FFFD -from aspen.sockets.transport import XHRPollingTransport -from aspen.sockets.message import Message - - -def test_transport_instantiable(harness): - transport = harness.make_transport() - - expected = XHRPollingTransport - actual = transport.__class__ - assert actual is expected - -def test_transport_can_minimally_respond(harness): - transport = harness.make_transport() - request = Request() - - expected = Response - actual = transport.respond(request).__class__ - assert actual is expected - -def test_transport_starts_in_state_0(harness): - transport = harness.make_transport() - - expected = 0 - actual = transport.state - assert actual == expected - -def test_transport_goes_to_state_1_after_first_request(harness): - transport = harness.make_transport() - request = Request() - transport.respond(request) - - expected = 1 - actual = transport.state - assert actual == expected - -def test_transport_stays_in_state_1_after_second_request(harness): - transport = harness.make_transport() - request = harness.make_socket_request() - transport.respond(request) - transport.respond(request) - - expected = 1 - actual = transport.state - assert actual == expected - -def test_transport_POST_gives_data_to_socket(harness): - transport = harness.make_transport(state=1) - - msg = b'3::/echo.sock:Greetings, program!' - request = Request( 'POST' - , '/echo.sock' - , body=StringIO(msg) - , headers={'content-length' : str(len(msg)), 'Host': 'Testhost' } - ) - transport.respond(request) - - expected = deque(['Greetings, program!']) - actual = transport.socket.incoming.queue - assert actual == expected - -def test_transport_GET_gets_data_from_socket(harness): - transport = harness.make_transport(state=1) - message = Message.from_bytes(b"3:::Greetings, program!") - transport.socket.outgoing.put(message) - - request = Request('GET') - response = transport.respond(request) - - expected = FFFD+b'23'+FFFD+b'3:::Greetings, program!' - actual = response.body.next() - assert actual == expected - -def test_transport_GET_blocks_for_empty_socket(harness): - transport = harness.make_transport(state=1) - - request = harness.make_socket_request() - start = time.time() - transport.respond(request) - end = time.time() - - expected = transport.timeout - actual = round(end - start, 4) - assert actual > expected - -def test_transport_handles_roundtrip(harness): - transport = harness.make_transport(state=1, content="socket.send(socket.recv())") - msg = b"3::/echo.sock:ping" - request = Request('POST', '/echo.sock', body=StringIO(msg), - headers={ 'content-length' : str(len(msg)), 'Host': 'Testhost' }) - transport.respond(request) - transport.socket.tick() # do it manually - - request = Request('GET', '/echo.sock') - response = transport.respond(request) - - expected = FFFD+b"18"+FFFD+b"3::/echo.sock:ping" - actual = response.body.next() - assert actual == expected