diff --git a/ci.sh b/ci.sh index b4f56f7114..d4f9df3a94 100755 --- a/ci.sh +++ b/ci.sh @@ -72,14 +72,6 @@ python -m pip --version python setup.py sdist --formats=zip python -m pip install dist/*.zip -if python -c 'import sys; sys.exit(sys.version_info >= (3, 7))'; then - # Python < 3.7, select last ipython with 3.6 support - # macOS requires the suffix for --in-place or you get an undefined label error - sed -i'.bak' 's/ipython==[^ ]*/ipython==7.16.1/' test-requirements.txt - sed -i'.bak' 's/traitlets==[^ ]*/traitlets==4.3.3/' test-requirements.txt - git diff test-requirements.txt -fi - if [ "$CHECK_FORMATTING" = "1" ]; then python -m pip install -r test-requirements.txt source check.sh diff --git a/docs/source/reference-io.rst b/docs/source/reference-io.rst index 4d33601f17..b18983e272 100644 --- a/docs/source/reference-io.rst +++ b/docs/source/reference-io.rst @@ -237,8 +237,7 @@ other constants and functions in the :mod:`ssl` module. .. warning:: Avoid instantiating :class:`ssl.SSLContext` directly. A newly constructed :class:`~ssl.SSLContext` has less secure - defaults than one returned by :func:`ssl.create_default_context`, - dramatically so before Python 3.6. + defaults than one returned by :func:`ssl.create_default_context`. Instead of using :meth:`ssl.SSLContext.wrap_socket`, you create a :class:`SSLStream`: @@ -722,7 +721,7 @@ subprocess`` in order to access constants such as ``PIPE`` or Currently, Trio always uses unbuffered byte streams for communicating with a process, so it does not support the ``encoding``, ``errors``, -``universal_newlines`` (alias ``text`` in 3.7+), and ``bufsize`` +``universal_newlines`` (alias ``text``), and ``bufsize`` options. diff --git a/setup.py b/setup.py index 2364aac4d3..5c5890d8a5 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ Vital statistics: * Supported environments: Linux, macOS, or Windows running some kind of Python - 3.6-or-better (either CPython or PyPy3 is fine). \\*BSD and illumos likely + 3.7-or-better (either CPython or PyPy3 is fine). \\*BSD and illumos likely work too, but are not tested. * Install: ``python3 -m pip install -U trio`` (or on Windows, maybe diff --git a/test-requirements.in b/test-requirements.in index 86873ec3ed..75e69f2c9b 100644 --- a/test-requirements.in +++ b/test-requirements.in @@ -23,15 +23,9 @@ typing-extensions; implementation_name == "cpython" # Trio's own dependencies cffi; os_name == "nt" -contextvars; python_version < "3.7" attrs >= 19.2.0 sortedcontainers async_generator >= 1.9 idna outcome sniffio - -# Required by contextvars, but harmless to install everywhere. -# dependabot drops the contextvars dependency because it runs -# on 3.7. -immutables >= 0.6 diff --git a/trio/_core/_entry_queue.py b/trio/_core/_entry_queue.py index 8f6eb05e3b..9f3301b3d2 100644 --- a/trio/_core/_entry_queue.py +++ b/trio/_core/_entry_queue.py @@ -15,8 +15,8 @@ class EntryQueue: # not signal-safe. deque is implemented in C, so each operation is atomic # WRT threads (and this is guaranteed in the docs), AND each operation is # atomic WRT signal delivery (signal handlers can run on either side, but - # not *during* a deque operation). dict makes similar guarantees - and on - # CPython 3.6 and PyPy, it's even ordered! + # not *during* a deque operation). dict makes similar guarantees - and + # it's even ordered! queue = attr.ib(factory=deque) idempotent_queue = attr.ib(factory=dict) diff --git a/trio/_core/_run.py b/trio/_core/_run.py index ff90fb6a55..592280abf2 100644 --- a/trio/_core/_run.py +++ b/trio/_core/_run.py @@ -1,18 +1,12 @@ -# coding: utf-8 - import functools import itertools -import logging -import os import random import select import sys import threading from collections import deque -import collections.abc from contextlib import contextmanager import warnings -import weakref import enum from contextvars import copy_context @@ -46,7 +40,6 @@ from ._thread_cache import start_thread_soon from ._instrumentation import Instruments from .. import _core -from .._deprecate import warn_deprecated from .._util import Final, NoPublicConstructor, coroutine_or_error if sys.version_info < (3, 11): @@ -72,13 +65,8 @@ def _public(fn): _r = random.Random() -# On 3.7+, Context.run() is implemented in C and doesn't show up in -# tracebacks. On 3.6, we use the contextvars backport, which is -# currently implemented in Python and adds 1 frame to tracebacks. So this -# function is a super-overkill version of "0 if sys.version_info >= (3, 7) -# else 1". But if Context.run ever changes, we'll be ready! -# -# This can all be removed once we drop support for 3.6. +# On CPython, Context.run() is implemented in C and doesn't show up in +# tracebacks. On PyPy, it is implemented in Python and adds 1 frame to tracebacks. def _count_context_run_tb_frames(): def function_with_unique_name_xyzzy(): 1 / 0 @@ -2202,7 +2190,7 @@ def unrolled_run(runner, async_fn, args, host_uses_signal_set_wakeup_fd=False): try: # We used to unwrap the Outcome object here and send/throw # its contents in directly, but it turns out that .throw() - # is buggy, at least on CPython 3.6: + # is buggy, at least before CPython 3.9: # https://bugs.python.org/issue29587 # https://bugs.python.org/issue29590 # So now we send in the Outcome object and unwrap it on the diff --git a/trio/_core/_wakeup_socketpair.py b/trio/_core/_wakeup_socketpair.py index 8115eba3db..8d51419ecc 100644 --- a/trio/_core/_wakeup_socketpair.py +++ b/trio/_core/_wakeup_socketpair.py @@ -1,5 +1,4 @@ import socket -import sys import signal import warnings @@ -7,21 +6,6 @@ from .._util import is_main_thread -def _has_warn_on_full_buffer(): - if "__pypy__" not in sys.builtin_module_names: - # CPython has warn_on_full_buffer. Don't need to inspect. - # Also, CPython doesn't support inspecting built-in functions. - return True - - import inspect - - args_spec = inspect.getfullargspec(signal.set_wakeup_fd) - return "warn_on_full_buffer" in args_spec.kwonlyargs - - -HAVE_WARN_ON_FULL_BUFFER = _has_warn_on_full_buffer() - - class WakeupSocketpair: def __init__(self): self.wakeup_sock, self.write_sock = socket.socketpair() @@ -35,13 +19,8 @@ def __init__(self): # Windows 10: 525347 # Windows you're weird. (And on Windows setting SNDBUF to 0 makes send # blocking, even on non-blocking sockets, so don't do that.) - # - # But, if we're on an old Python and can't control the signal module's - # warn-on-full-buffer behavior, then we need to leave things alone, so - # the signal module won't spam the console with spurious warnings. - if HAVE_WARN_ON_FULL_BUFFER: - self.wakeup_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1) - self.write_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1) + self.wakeup_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1) + self.write_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1) # On Windows this is a TCP socket so this might matter. On other # platforms this fails b/c AF_UNIX sockets aren't actually TCP. try: @@ -72,10 +51,7 @@ def wakeup_on_signals(self): if not is_main_thread(): return fd = self.write_sock.fileno() - if HAVE_WARN_ON_FULL_BUFFER: - self.old_wakeup_fd = signal.set_wakeup_fd(fd, warn_on_full_buffer=False) - else: - self.old_wakeup_fd = signal.set_wakeup_fd(fd) + self.old_wakeup_fd = signal.set_wakeup_fd(fd, warn_on_full_buffer=False) if self.old_wakeup_fd != -1: warnings.warn( RuntimeWarning( diff --git a/trio/_core/tests/test_ki.py b/trio/_core/tests/test_ki.py index 78a0db6237..3636c34d70 100644 --- a/trio/_core/tests/test_ki.py +++ b/trio/_core/tests/test_ki.py @@ -490,115 +490,3 @@ async def inner(): _core.run(inner) finally: threading._active[thread.ident] = original - - -# For details on why this test is non-trivial, see: -# https://github.com/python-trio/trio/issues/42 -# https://github.com/python-trio/trio/issues/109 -@slow -def test_ki_wakes_us_up(): - assert is_main_thread() - - # This test is flaky due to a race condition on Windows; see: - # https://github.com/python-trio/trio/issues/119 - # https://bugs.python.org/issue30038 - # I think the only fix is to wait for fixed CPython to be released, so in - # the mean time, on affected versions we send two signals (equivalent to - # hitting control-C twice). This works because the problem is that the C - # level signal handler does - # - # write-to-fd -> set-flags - # - # and we need - # - # set-flags -> write-to-fd - # - # so running the C level signal handler twice does - # - # write-to-fd -> set-flags -> write-to-fd -> set-flags - # - # which contains the desired sequence. - # - # Affected version of CPython include 3.6.1 and earlier. - # It's fixed in 3.6.2 and 3.7+ - # - # PyPy was never affected. - # - # The problem technically can occur on Unix as well, if a signal is - # delivered to a non-main thread, though we haven't observed this in - # practice. - # - # There's also this theoretical problem, but hopefully it won't actually - # bite us in practice: - # https://bugs.python.org/issue31119 - # https://bitbucket.org/pypy/pypy/issues/2623 - import platform - - # lock is only needed to avoid an annoying race condition where the - # *second* ki_self() call arrives *after* the first one woke us up and its - # KeyboardInterrupt was caught, and then generates a second - # KeyboardInterrupt that aborts the test run. The kill_soon thread holds - # the lock while doing the calls to ki_self, which means that it holds it - # while the C-level signal handler is running. Then in the main thread, - # when we're woken up we know that ki_self() has been run at least once; - # if we then take the lock it guaranteeds that ki_self() has been run - # twice, so if a second KeyboardInterrupt is going to arrive it should - # arrive by the time we've acquired the lock. This lets us force it to - # happen inside the pytest.raises block. - # - # It will be very nice when the buggy_wakeup_fd bug is fixed. - lock = threading.Lock() - - def kill_soon(): - # We want the signal to be raised after the main thread has entered - # the IO manager blocking primitive. There really is no way to - # deterministically interlock with that, so we have to use sleep and - # hope it's long enough. - time.sleep(1.1) - with lock: - print("thread doing ki_self()") - ki_self() - - async def main(): - thread = threading.Thread(target=kill_soon) - print("Starting thread") - thread.start() - try: - with pytest.raises(KeyboardInterrupt): - # To limit the damage on CI if this does get broken (as - # compared to sleep_forever()) - print("Going to sleep") - try: - await sleep(20) - print("Woke without raising?!") # pragma: no cover - # The only purpose of this finally: block is to soak up the - # second KeyboardInterrupt that might arrive on - # buggy_wakeup_fd platforms. So it might get aborted at any - # moment randomly on some runs, so pragma: no cover avoids - # coverage flapping: - finally: # pragma: no cover - print("waiting for lock") - with lock: - print("got lock") - # And then we want to force a PyErr_CheckSignals. Which is - # not so easy on Windows. Weird kluge: builtin_repr calls - # PyObject_Repr, which does an unconditional - # PyErr_CheckSignals for some reason. - print(repr(None)) - # And finally, it's possible that the signal was delivered - # but at a moment when we had KI protection enabled, so we - # need to execute a checkpoint to ensure it's delivered - # before we exit main(). - await _core.checkpoint() - finally: - print("joining thread", sys.exc_info()) - thread.join() - - start = time.perf_counter() - try: - _core.run(main) - finally: - end = time.perf_counter() - print("duration", end - start) - print("sys.exc_info", sys.exc_info()) - assert 1.0 <= (end - start) < 2 diff --git a/trio/_core/tests/test_run.py b/trio/_core/tests/test_run.py index 5c373fe08e..0c563d5310 100644 --- a/trio/_core/tests/test_run.py +++ b/trio/_core/tests/test_run.py @@ -1060,7 +1060,7 @@ async def child2(): ] -# At least as of CPython 3.6, using .throw() to raise an exception inside a +# Before CPython 3.9, using .throw() to raise an exception inside a # coroutine/generator causes the original exc_info state to be lost, so things # like re-raising and exception chaining are broken. # diff --git a/trio/_highlevel_ssl_helpers.py b/trio/_highlevel_ssl_helpers.py index a339a3d238..19b1ff8777 100644 --- a/trio/_highlevel_ssl_helpers.py +++ b/trio/_highlevel_ssl_helpers.py @@ -19,7 +19,6 @@ async def open_ssl_over_tcp_stream( *, https_compatible=False, ssl_context=None, - # No trailing comma b/c bpo-9232 (fixed in py36) happy_eyeballs_delay=DEFAULT_DELAY, ): """Make a TLS-encrypted Connection to the given host and port over TCP. diff --git a/trio/_socket.py b/trio/_socket.py index 4e4e603726..886f5614f6 100644 --- a/trio/_socket.py +++ b/trio/_socket.py @@ -50,7 +50,7 @@ async def __aexit__(self, etype, value, tb): try: from socket import IPPROTO_IPV6 except ImportError: - # As of at least 3.6, python on Windows is missing IPPROTO_IPV6 + # Before Python 3.8, Windows is missing IPPROTO_IPV6 # https://bugs.python.org/issue29515 if sys.platform == "win32": # pragma: no branch IPPROTO_IPV6 = 41 diff --git a/trio/_util.py b/trio/_util.py index cbcc6255cd..602df0d7bf 100644 --- a/trio/_util.py +++ b/trio/_util.py @@ -266,21 +266,7 @@ def __getitem__(self, _): return self -# If a new class inherits from any ABC, then the new class's metaclass has to -# inherit from ABCMeta. If a new class inherits from typing.Generic, and -# you're using Python 3.6, then the new class's metaclass has to -# inherit from typing.GenericMeta. Some of the classes that want to use Final -# or NoPublicConstructor inherit from ABCs and generics, so Final has to -# inherit from these metaclasses. Fortunately, GenericMeta inherits from -# ABCMeta, so inheriting from GenericMeta alone is sufficient (when it -# exists at all). -if not t.TYPE_CHECKING and hasattr(t, "GenericMeta"): - BaseMeta = t.GenericMeta -else: - BaseMeta = ABCMeta - - -class Final(BaseMeta): +class Final(ABCMeta): """Metaclass that enforces a class to be final (i.e., subclass not allowed). If a class uses this metaclass like this:: diff --git a/trio/socket.py b/trio/socket.py index 27e75c8dbc..613375ef41 100644 --- a/trio/socket.py +++ b/trio/socket.py @@ -188,18 +188,6 @@ # get names used by Trio that we define on our own from ._socket import IPPROTO_IPV6 -# Not defined in all python versions and platforms but sometimes needed -if not _t.TYPE_CHECKING: - try: - TCP_NOTSENT_LOWAT - except NameError: - # Hopefully will show up in 3.7: - # https://github.com/python/cpython/pull/477 - if sys.platform == "darwin": - TCP_NOTSENT_LOWAT = 0x201 - elif sys.platform == "linux": - TCP_NOTSENT_LOWAT = 25 - if _t.TYPE_CHECKING: IP_BIND_ADDRESS_NO_PORT: int else: diff --git a/trio/tests/test_ssl.py b/trio/tests/test_ssl.py index 339ef183a4..fee4063b74 100644 --- a/trio/tests/test_ssl.py +++ b/trio/tests/test_ssl.py @@ -59,23 +59,14 @@ TRIO_TEST_1_CERT.configure_cert(SERVER_CTX) + # TLS 1.3 has a lot of changes from previous versions. So we want to run tests # with both TLS 1.3, and TLS 1.2. -if hasattr(ssl, "OP_NO_TLSv1_3"): - # "tls13" means that we're willing to negotiate TLS 1.3. Usually that's - # what will happen, but the renegotiation tests explicitly force a - # downgrade on the server side. "tls12" means we refuse to negotiate TLS - # 1.3, so we'll almost certainly use TLS 1.2. - client_ctx_params = ["tls13", "tls12"] -else: - # We can't control whether we use TLS 1.3, so we just have to accept - # whatever openssl wants to use. This might be TLS 1.2 (if openssl is - # old), or it might be TLS 1.3 (if openssl is new, but our python version - # is too old to expose the configuration knobs). - client_ctx_params = ["default"] - - -@pytest.fixture(scope="module", params=client_ctx_params) +# "tls13" means that we're willing to negotiate TLS 1.3. Usually that's +# what will happen, but the renegotiation tests explicitly force a +# downgrade on the server side. "tls12" means we refuse to negotiate TLS +# 1.3, so we'll almost certainly use TLS 1.2. +@pytest.fixture(scope="module", params=["tls13", "tls12"]) def client_ctx(request): ctx = ssl.create_default_context() diff --git a/trio/tests/test_threads.py b/trio/tests/test_threads.py index 47727817e6..11fc34c131 100644 --- a/trio/tests/test_threads.py +++ b/trio/tests/test_threads.py @@ -300,8 +300,8 @@ async def test_run_in_worker_thread_limiter(MAX, cancel, use_default_limiter): try: # We used to use regular variables and 'nonlocal' here, but it turns # out that it's not safe to assign to closed-over variables that are - # visible in multiple threads, at least as of CPython 3.6 and PyPy - # 5.8: + # visible in multiple threads, at least as of CPython 3.10 and PyPy + # 7.3: # # https://bugs.python.org/issue30744 # https://bitbucket.org/pypy/pypy/issues/2591/