From 063bf70caa8288b415f279614c48ca1163782691 Mon Sep 17 00:00:00 2001 From: "H. Phil Duby" Date: Sat, 5 Oct 2024 21:15:51 -0600 Subject: [PATCH 1/7] Add non-blocking mode, notifications, retry, unittests. --- .pre-commit-config.yaml | 8 +- .pylintrc | 4 +- adafruit_ntp.py | 563 ++++++++++-- examples/ntp_cpython.py | 1 - examples/ntp_powersave.py | 90 ++ tests/__init__.py | 0 tests/mocks/__init__.py | 0 tests/mocks/mock_pool.py | 263 ++++++ tests/mocks/mock_time.py | 157 ++++ tests/ntp_testing_support.py | 1176 +++++++++++++++++++++++++ tests/shared_for_testing.py | 283 ++++++ tests/simulate_ntp_packet.py | 261 ++++++ tests/test_eventtype.py | 99 +++ tests/test_mock.py | 1434 +++++++++++++++++++++++++++++++ tests/test_ntp.py | 971 +++++++++++++++++++++ tests/test_ntp_instantiation.py | 190 ++++ tests/test_ntp_registration.py | 268 ++++++ 17 files changed, 5688 insertions(+), 80 deletions(-) create mode 100644 examples/ntp_powersave.py create mode 100644 tests/__init__.py create mode 100644 tests/mocks/__init__.py create mode 100644 tests/mocks/mock_pool.py create mode 100644 tests/mocks/mock_time.py create mode 100644 tests/ntp_testing_support.py create mode 100644 tests/shared_for_testing.py create mode 100644 tests/simulate_ntp_packet.py create mode 100644 tests/test_eventtype.py create mode 100644 tests/test_mock.py create mode 100644 tests/test_ntp.py create mode 100644 tests/test_ntp_instantiation.py create mode 100644 tests/test_ntp_registration.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 70ade69..98790bc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,21 +4,21 @@ repos: - repo: https://github.com/python/black - rev: 23.3.0 + rev: 24.8.0 hooks: - id: black - repo: https://github.com/fsfe/reuse-tool - rev: v1.1.2 + rev: v4.0.3 hooks: - id: reuse - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.6.0 hooks: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/pycqa/pylint - rev: v2.17.4 + rev: v3.2.6 hooks: - id: pylint name: pylint (library code) diff --git a/.pylintrc b/.pylintrc index f945e92..9425693 100644 --- a/.pylintrc +++ b/.pylintrc @@ -19,7 +19,7 @@ ignore-patterns= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). -#init-hook= +init-hook='import sys;sys.path.insert(0,"./lib")' # Use multiple processes to speed up Pylint. jobs=1 @@ -159,7 +159,7 @@ ignored-classes=optparse.Values,thread._local,_thread._local # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis. It # supports qualified module names, as well as Unix pattern matching. -ignored-modules=board +ignored-modules=board,socketpool # Show a hint with possible names when a member name was not found. The aspect # of finding the hint is based on edit distance. diff --git a/adafruit_ntp.py b/adafruit_ntp.py index 9dc7476..31a0c39 100644 --- a/adafruit_ntp.py +++ b/adafruit_ntp.py @@ -8,7 +8,7 @@ Network Time Protocol (NTP) helper for CircuitPython - * Author(s): Scott Shawcroft + * Author(s): Scott Shawcroft, H Phil Duby Implementation Notes -------------------- @@ -19,127 +19,544 @@ https://github.com/adafruit/circuitpython/releases """ +from errno import ETIMEDOUT import struct import time -from micropython import const +try: + from time import gmtime as localtime # use gmtime with CPython +except ImportError: + from time import localtime # gmtime not available in CircuitPython +try: + import socket as SocketPool +except ImportError: + from socketpool import SocketPool # type:ignore +try: + from micropython import const + import circuitpython_typing +except ImportError: + pass +try: + from typing import Callable, Optional, Union, Tuple, Dict +except ImportError: + pass +try: + from typing import Protocol +except ImportError: + try: + from typing_extensions import Protocol + except ImportError: + Protocol = None +if Protocol is not None: + + class SocketProtocol(Protocol): + """ + Interface for socket needed by NTP class. Allow IDE static type checking and auto + complete. + """ + + # pylint:disable=redefined-builtin,missing-function-docstring + def settimeout(self, seconds: int) -> None: ... + def sendto( + self, bytes: circuitpython_typing.ReadableBuffer, address: Tuple[str, int] + ) -> int: ... + def recv_into( + self, buffer: circuitpython_typing.WriteableBuffer, bufsize: int + ) -> int: ... __version__ = "0.0.0+auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_NTP.git" -NTP_TO_UNIX_EPOCH = 2208988800 # 1970-01-01 00:00:00 -PACKET_SIZE = const(48) +NTP_TO_UNIX_EPOCH: int = 2_208_988_800 # 1900-01-01 00:00:00 to 1970-01-01 00:00:00 +NS_PER_SEC: int = 1_000_000_000 +PACKET_SIZE: int = 48 + + +# No enum module in CircuitPython, so implement simple version. +class _IntFlag: # pylint:disable=too-few-public-methods + """Simple private implementation of an IntFlag enum. + + This contains hard-coded limits specific to the EventType class.""" + + def __init__(self, value=0): + if isinstance(value, _IntFlag): + self.value = value.value + else: + self.value = value + + def __and__(self, other): + if isinstance(other, _IntFlag): + return _IntFlag(self.value & other.value) + return self.value & other + + def __or__(self, other): + if isinstance(other, _IntFlag): + return _IntFlag(self.value | other.value) + return self.value | other + + def __xor__(self, other): + if isinstance(other, _IntFlag): + return _IntFlag(self.value ^ other.value) + return self.value ^ other + + def __add__(self, other): + return self.__or__(other) + + def __gt__(self, other): + if isinstance(other, _IntFlag): + return self.value > other.value + return self.value > other + + def __lt__(self, other): + if isinstance(other, _IntFlag): + return self.value < other.value + return self.value < other + + def __invert__(self): + # This relies on ALL_EVENTS existing, and being set correctly. + all_flags = EventType.ALL_EVENTS.value + return _IntFlag(~self.value & all_flags) + + def __repr__(self): + return f"IntFlag({self.value})" + + def __eq__(self, other): + if isinstance(other, _IntFlag): + return self.value == other.value + return self.value == other + + def __bool__(self): + return bool(self.value) + + @classmethod + def from_value(cls, value): + """enum emulation""" + return cls(value) + + +class EventType(_IntFlag): # pylint:disable=too-few-public-methods + """NTP callback notification Event Types.""" + + NO_EVENT = _IntFlag(0b000) + SYNC_COMPLETE = _IntFlag(0b001) + SYNC_FAILED = _IntFlag(0b010) # get packet failed + LOOKUP_FAILED = _IntFlag(0b100) # getaddrinfo failed (not timedout?) + # The custom _IntFlat class used uses the ALL_EVENTS attribute here to get a bit mask to use + # when inverting flag values. Make sure to update this if changing how many event types are + # being used. + ALL_EVENTS = _IntFlag(0b111) + + +class NTPIncompleteError(TimeoutError): + """ + NTP synchronize has never completed successfully yet. + + Raised when an NTP operation cannot complete in non-blocking mode or during retries. + + For transient errors and partial progress. Further NTP calls will retry or + continue to the next step. + """ class NTP: # pylint:disable=too-many-instance-attributes """Network Time Protocol (NTP) helper module for CircuitPython. This module does not handle daylight savings or local time. It simply requests - UTC from a NTP server. + UTC from an NTP server. + + This class uses a simple state machine to manage synchronization: + - USING_CACHED_REFERENCE (state 3): The default state where the cached time reference is used. + - Transitions to GETTING_SOCKET when the cache expires. + - GETTING_SOCKET (state 1): Attempts to perform a DNS lookup for the NTP server. + - Transitions to GETTING_PACKET on success. + - Remains in this state if retries are needed. + - GETTING_PACKET (state 2): Sends an NTP request and waits for the response. + - Transitions back to USING_CACHED_REFERENCE. + - On failure, any existing cached value will continue to be used until the next scheduled + synchronization. + + The state transitions are managed by the `_update_time_sync` method, which is called if + the cached time is expired when `utc_ns` is accessed. """ + # State machine values + GETTING_SOCKET: int = const(1) # To have getting packet + GETTING_PACKET: int = const(2) # To cached + USING_CACHED_REFERENCE: int = const(3) # To getting socket + # Exponential retry backoff configuration + FIRST_WAIT = int(3 * NS_PER_SEC) # 3 seconds in nanoseconds + """Time to wait before retry after first (network operation) failure.""" + MAX_WAIT = int(60 * NS_PER_SEC) # maximum wait time in nanoseconds + """Maximum time to wait before retry after a failure.""" + WAIT_FACTOR = const(1.5) # multiplier for exponential backoff + """Amount (multiplication factor) to increase the wait before retry after each failure.""" + + MINIMUM_RETRY_DELAY: int = const(60) # 60 seconds + """A (specified or default) cache_seconds value of 0 is intended to mean using the NTP + pooling interval returned by the server. However, that also means that no value is known + until a successful synchronization. If the initial attempts get timeout errors, + because the server is busy, that 0 value results in the already busy server (or network) + getting pounded by an unbroken stream of retry requests. Limit that possibility.""" + def __init__( # pylint:disable=too-many-arguments self, - socketpool, + socketpool: SocketPool, *, server: str = "0.adafruit.pool.ntp.org", port: int = 123, tz_offset: float = 0, socket_timeout: int = 10, cache_seconds: int = 0, + blocking: bool = True, ) -> None: """ :param object socketpool: A socket provider such as CPython's `socket` module. - :param str server: The domain of the ntp server to query. + :param str server: The domain (url) of the ntp server to query. :param int port: The port of the ntp server to query. :param float tz_offset: Timezone offset in hours from UTC. Only useful for timezone ignorant CircuitPython. CPython will determine timezone automatically and adjust (so don't use this.) For example, Pacific daylight savings time is -7. :param int socket_timeout: UDP socket timeout, in seconds. - :param int cache_seconds: how many seconds to use a cached result from NTP server + :param int cache_seconds: How many seconds to use a cached result from NTP server (default 0, which respects NTP server's minimum). + :param bool blocking: Determines whether the NTP operations should be blocking or + non-blocking. """ - self._pool = socketpool - self._server = server - self._port = port - self._socket_address = None + self._pool: SocketPool = socketpool + self._server: str = server + self._port: int = port + self._socket_address: Optional[Tuple[str, int]] = None self._packet = bytearray(PACKET_SIZE) - self._tz_offset = int(tz_offset * 60 * 60) - self._socket_timeout = socket_timeout - self._cache_seconds = cache_seconds + self._tz_offset_ns = int(tz_offset * 60 * 60) * NS_PER_SEC + self._socket_timeout: int = socket_timeout + self._cache_ns: int = NS_PER_SEC * max(self.MINIMUM_RETRY_DELAY, cache_seconds) + self._blocking: bool = blocking + + # State management + + # This is our estimated start time for the monotonic clock (ns since unix epoch). + # We adjust it based on the ntp responses. + self._monotonic_start_ns: int = 0 + self._next_sync: int = 0 + self._state: int = self.USING_CACHED_REFERENCE + self._last_sync_time: int = 0 # Track the last successful sync time + self._callbacks: Dict[Callable[[_IntFlag, int], None], _IntFlag] = {} + # The variables _next_rate_limit_end and _next_rate_limit_delay are intentionally + # not initialized here because they are only used after calling + # _initialize_backoff_timing(). Accessing them before initialization will raise an + # AttributeError, indicating a misuse of the class. + self._next_rate_limit_end: int + self._next_rate_limit_delay: int + + @property + def datetime(self) -> time.struct_time: + """ + Time (structure) based on NTP sever time reference. Time synchronization is updated + if the cache has expired. + + CircuitPython always expects to be working with UTC. CPython though will use it's own + notion of the current time zone when using localtime. To get those to be consistent, + localtime is overridden (during import) to use gmtime when running in CPython. That + way this should always return UTC information. + + :returns time.struct_time: current UTC time in seconds. + """ + current_time_s = self.utc_ns // NS_PER_SEC # seconds since unix epoch + return localtime(current_time_s) + + @property + def utc_ns(self) -> int: + """UTC (unix epoch) time in nanoseconds based on NTP sever time reference. + Time synchronization updated if the cache has expired. + + :returns: integer number of nanoseconds since the unix epoch (1970-01-01 00:00:00). + :raises NTPIncompleteError: if no NTP synchronization has been successful yet. + """ + if time.monotonic_ns() > self._next_sync: + self._update_time_sync() + # if self._last_sync_time <= 0: + if self._monotonic_start_ns <= 0: + raise NTPIncompleteError("No NTP synchronization has been successful yet") + return time.monotonic_ns() + self._monotonic_start_ns + + @property + def blocking(self) -> bool: + """Return the current blocking setting.""" + return self._blocking + + @blocking.setter + def blocking(self, value: bool) -> None: + """Update the current blocking mode setting""" + self._blocking = value - # This is our estimated start time for the monotonic clock. We adjust it based on the ntp - # responses. - self._monotonic_start_ns = 0 + @property + def since_sync_ns(self) -> int: + """Return the duration in nanoseconds since the last successful synchronization.""" + if self._next_sync == 0: + raise NTPIncompleteError("No NTP synchronization has been successful yet") + return time.monotonic_ns() - self._last_sync_time - self.next_sync = 0 + @property + def cache_ns(self) -> int: + """Return the current cache value in nanoseconds.""" + return self._cache_ns def _update_time_sync(self) -> None: - """Update the time sync value. Raises OSError exception if no response - is received within socket_timeout seconds, ArithmeticError for substantially incorrect - NTP results.""" - if self._socket_address is None: - self._socket_address = self._pool.getaddrinfo(self._server, self._port)[0][ - 4 - ] - - self._packet[0] = 0b00100011 # Not leap second, NTP version 4, Client mode + """ + Manage NTP synchronization state transition continue and retry logic. + + Handles synchronization by progressing through the following states: + - USING_CACHED_REFERENCE: Initialized retry backoff timing. + - GETTING_SOCKET: Perform DNS lookup for the NTP server. + - GETTING_PACKET: Send NTP request and await response. + """ + self._notify_ntp_event_callbacks(EventType.NO_EVENT, -1) + if self._state == self.USING_CACHED_REFERENCE: + # Cached offset value expired, reinitialize backoff timing and proceed to DNS lookup. + self._initialize_backoff_timing() + self._state = self.GETTING_SOCKET + if self._state == self.GETTING_SOCKET: + # Attempt DNS lookup; if non-blocking, exit early. + self._get_socket() + if not self._blocking: + return + if self._state == self.GETTING_PACKET: + self._get_ntp_packet() + + def _initialize_backoff_timing(self) -> None: + """Initialize backoff timing values.""" + self._next_rate_limit_delay = self.FIRST_WAIT + self._next_rate_limit_end = 0 # time.monotonic_ns + + def _get_socket(self) -> None: + """Get the socket address for the NTP server.""" + now = time.monotonic_ns() + if now < self._next_rate_limit_end: + if not self._blocking: + # Not blocking and a rate limiting delay is in progress, so no + # operation to do currently. + return + # Wait here until the rate limiting delay has expired then continue. + time.sleep((self._next_rate_limit_end - now) / NS_PER_SEC) + self._server_dns_lookup() + + def _server_dns_lookup(self) -> None: + """ + Get the IP address for the configured NTP server. + + In CircuitPython, exceptions raised during DNS lookup do not differentiate between + connection problems and DNS failures. Therefore, we treat any exception here as a + LOOKUP_FAILED event and rely on external logic, which may have more context + information, to handle network connectivity. + """ + try: + # List[Tuple[int, int, int, str, Tuple[str, int]]] + # List[Tuple[AddressFamily, SocketKind, int, str, Tuple[str, int]]] + new_socket = self._pool.getaddrinfo(self._server, self._port)[0][4] + self._socket_address = new_socket + self._state = self.GETTING_PACKET + except SocketPool.gaierror: + self._exponential_lookup_retry_backoff() + if self._blocking and self._last_sync_time <= 0: + # Legacy blocking mode and do not have any fallback time sync offset. The caller + # needs to handle the specific failure exception. + raise + # otherwise continue normally + + def register_ntp_event_callback( + self, + callback: Callable[[_IntFlag, int], None], + event_types: Union[_IntFlag, int] = EventType.SYNC_COMPLETE, + ) -> None: + """ + Register a callback to be notified for specific NTP events. + + Callbacks can be used to turn off the radio to save power, initiate a network + connection, or other progress monitoring processes. + EG: wifi.radio.enabled = False or connection_manager.connect() + + NOTE: This implementation does not prevent duplicate registration of the same callback. + All attempts to consistently identify when a callback is already registered have + failed due to the limitations of the current circuitpython implementation. Comparing + the callback value directly, converting to string using str(), or repr(), or to a + number using id() all have cases where an identical callback reference will be + treated as different. + + If the same callback is registered multiple times, with the same event type, it will + be called multiple times when that event type occurs. + + :param Callable[[IntFlag, int], None] callback: The callback function to register. + :param IntFlag event_types: The event types that should trigger this callback. This can + be a single event type or a combination of multiple events. + Defaults to EventType.SYNC_COMPLETE. + :raises TypeError: If the event_types argument is not a valid event type or combination of + event types. + + Usage examples: + from adafruit_ntp import NTP, EventType + ntp = NTP(socketpool) + + def on_sync_complete(event_type: IntFlag, next_time: int) -> None: + print(f"{event_type.name} event: Next operation scheduled at {next_time} ns") + + # Default behavior, only triggers on sync complete + ntp.register_ntp_event_callback(on_sync_complete) + + def on_ntp_event(event_type: IntFlag, next_time: int) -> None: + if event_type == EventType.SYNC_COMPLETE: + print(f"Synchronization complete. Next sync at {next_time}") + elif event_type == EventType.SYNC_FAILED: + print(f"Sync failed. Will retry at {next_time}") + elif event_type == EventType.LOOKUP_FAILED: + print(f"DNS lookup failed, need to verify active network connection.") + + # Register for multiple events for a single callback + ntp.register_ntp_event_callback(on_ntp_event, EventType.SYNC_COMPLETE | \\ + EventType.SYNC_FAILED | EventType.LOOKUP_FAILED) + """ + if not isinstance(event_types, (_IntFlag, int)): + raise TypeError(f"{type(event_types)} is not compatible with event types") + if not ( + (EventType.ALL_EVENTS | event_types) == EventType.ALL_EVENTS + or event_types == EventType.NO_EVENT + ): + events_value = ( + event_types if isinstance(event_types, int) else event_types.value + ) + raise TypeError( + f"Invalid event type mask 0b{events_value:b}. " + "Only known events can receive notifications." + ) + self._callbacks[callback] = event_types + + def _notify_ntp_event_callbacks( + self, event_type: _IntFlag, next_operation_time: int + ) -> None: + """ + Call all registered callbacks that are interested in the given event type. + + :param IntFlag event_type: The type of event that occurred. + :param int next_operation_time: The time (in nanoseconds) when the next operation is + scheduled. + """ + for callback, registered_events in self._callbacks.items(): + if event_type & registered_events: + callback(event_type, next_operation_time) + + def _get_ntp_packet(self) -> None: + """ + Send the NTP packet and process the response to synchronize the local clock. + + Adjusts the local clock based on server-provided timestamps. + """ + # Prepare the NTP request packet: NTP version 4, Client mode, not leap second + self._packet[0] = 0b00100011 for i in range(1, PACKET_SIZE): self._packet[i] = 0 + + # Open a socket to send the packet and receive the response with self._pool.socket(self._pool.AF_INET, self._pool.SOCK_DGRAM) as sock: - sock.settimeout(self._socket_timeout) - local_send_ns = time.monotonic_ns() # expanded - sock.sendto(self._packet, self._socket_address) - sock.recv_into(self._packet) - # Get the time in the context to minimize the difference between it and receiving - # the packet. - local_recv_ns = time.monotonic_ns() # was destination + have_packet, local_send_ns, local_recv_ns = self._get_raw_ntp(sock) + if not have_packet: + return - poll = struct.unpack_from("!B", self._packet, offset=2)[0] + # Extract server receive and send timestamps from the response packet + srv_recv_ns, srv_send_ns = self._extract_ntp_timestamps() - cache_offset_s = max(2**poll, self._cache_seconds) - self.next_sync = local_recv_ns + cache_offset_s * 1_000_000_000 + # Calculate the clock offset using the formula: + # offset = (T2 + T3 - T4 - T1) / 2 + # where: + # - T1: local_send_ns (time request sent) + # - T2: srv_recv_ns (server received time) + # - T3: srv_send_ns (server sent time) + # - T4: local_recv_ns (time response received) + # That offset aligns the midpoint of the local times with the midpoint of the server times. + clock_offset = (srv_recv_ns + srv_send_ns - local_recv_ns - local_send_ns) // 2 - srv_recv_s, srv_recv_f = struct.unpack_from("!II", self._packet, offset=32) - srv_send_s, srv_send_f = struct.unpack_from("!II", self._packet, offset=40) + # Adjust local monotonic clock to synchronize with the NTP server time + self._monotonic_start_ns = clock_offset + self._tz_offset_ns + self._last_sync_time = local_recv_ns + self._next_sync = local_recv_ns + self._cache_ns - # Convert the server times from NTP to UTC for local use - srv_recv_ns = (srv_recv_s - NTP_TO_UNIX_EPOCH) * 1_000_000_000 + ( - srv_recv_f * 1_000_000_000 // 2**32 - ) - srv_send_ns = (srv_send_s - NTP_TO_UNIX_EPOCH) * 1_000_000_000 + ( - srv_send_f * 1_000_000_000 // 2**32 - ) + # Notify registered callbacks that synchronization has completed + self._notify_ntp_event_callbacks(EventType.SYNC_COMPLETE, self._next_sync) - # _round_trip_delay = (local_recv_ns - local_send_ns) - (srv_send_ns - srv_recv_ns) - # Calculate (best estimate) offset between server UTC and board monotonic_ns time - clock_offset = ( - (srv_recv_ns - local_send_ns) + (srv_send_ns - local_recv_ns) - ) // 2 + # Transition back to using the cached reference + self._state = self.USING_CACHED_REFERENCE - self._monotonic_start_ns = clock_offset + self._tz_offset * 1_000_000_000 + def _get_raw_ntp(self, sock: SocketProtocol) -> Tuple[bool, int, int]: + """Send the NTP packet and receive the response. - @property - def datetime(self) -> time.struct_time: - """Current time from NTP server. Accessing this property causes the NTP time request, - unless there has already been a recent request.""" - if time.monotonic_ns() > self.next_sync: - self._update_time_sync() + Timing sensitive, so treat as a single operation. + + If a timeout occurs (errno == ETIMEDOUT), we handle it gracefully by scheduling the next + sync attempt and notifying callbacks. Other exceptions are re-raised for the caller to + handle. + """ + packet_sent = False + sock.settimeout(self._socket_timeout) + local_send_ns = time.monotonic_ns() + try: + sock.sendto(self._packet, self._socket_address) + packet_sent = True + sock.recv_into(self._packet) + # Get the internal clock time reference while still in the context to minimize + # the difference between it and receiving the packet references. + local_recv_ns = time.monotonic_ns() + except OSError as ex: + if isinstance(ex, BrokenPipeError): + if packet_sent: + raise # case not currently handled + # notify as a lookup failure: ie got cached dns lookup when not connected + self._state = self.GETTING_SOCKET + self._exponential_lookup_retry_backoff() + if self._blocking and self._last_sync_time <= 0: + # Legacy blocking mode and do not have any fallback time sync offset. The + # caller needs to handle the specific failure exception. + raise + # otherwise abort the rest of the NTP attempt, and continue when next called. + return (False, -1, -1) - # Calculate the current time based on the current and start monotonic times - current_time_s = ( - time.monotonic_ns() + self._monotonic_start_ns - ) // 1_000_000_000 + if not packet_sent: + raise # other errors not expected for sendto. Not handled here + self._next_sync = time.monotonic_ns() + self._cache_ns + self._state = self.USING_CACHED_REFERENCE + self._notify_ntp_event_callbacks(EventType.SYNC_FAILED, self._next_sync) + if ex.errno == ETIMEDOUT: + return (False, -1, -1) + raise + return True, local_send_ns, local_recv_ns - return time.localtime(current_time_s) + def _extract_ntp_timestamps(self) -> Tuple[int, int]: + """Extract the receive and send timestamps from the NTP packet.""" + srv_recv_s, srv_recv_f = struct.unpack_from("!II", self._packet, offset=32) + srv_send_s, srv_send_f = struct.unpack_from("!II", self._packet, offset=40) - @property - def utc_ns(self) -> int: - """UTC (unix epoch) time in nanoseconds. Accessing this property causes the NTP time - request, unless there has already been a recent request. Raises OSError exception if - no response is received within socket_timeout seconds, ArithmeticError for substantially - incorrect NTP results.""" - if time.monotonic_ns() > self.next_sync: - self._update_time_sync() + srv_recv_ns = (srv_recv_s - NTP_TO_UNIX_EPOCH) * NS_PER_SEC + ( + srv_recv_f * NS_PER_SEC // 2**32 + ) + srv_send_ns = (srv_send_s - NTP_TO_UNIX_EPOCH) * NS_PER_SEC + ( + srv_send_f * NS_PER_SEC // 2**32 + ) + return srv_recv_ns, srv_send_ns - return time.monotonic_ns() + self._monotonic_start_ns + def _exponential_lookup_retry_backoff(self) -> None: + """ + Setup when the next lookup retry should occur due to a DNS lookup failure, or a + BrokenPipeError failure sending the NTP information request. + + This implementation uses an exponential backoff strategy with a maximum wait time + """ + # Configure next rate limiting delay period. This 'should' be the same regardless + # of any processing based on the detected specific exception case. + self._next_rate_limit_end = time.monotonic_ns() + self._next_rate_limit_delay + self._next_rate_limit_delay = min( + self.MAX_WAIT, int(self._next_rate_limit_delay * self.WAIT_FACTOR) + ) + # It does not seem possible to separate connection problems and DNS lookup failures + # in circuitpython. Notify as a lookup failure, but any notified external functionality + # should initially treat it as a connection problem, then ignore it, if it determines + # that a network connection is already available. + self._notify_ntp_event_callbacks( + EventType.LOOKUP_FAILED, self._next_rate_limit_end + ) diff --git a/examples/ntp_cpython.py b/examples/ntp_cpython.py index e01a3b6..c07dcaa 100644 --- a/examples/ntp_cpython.py +++ b/examples/ntp_cpython.py @@ -8,7 +8,6 @@ import adafruit_ntp -# Don't use tz_offset kwarg with CPython because it will adjust automatically. ntp = adafruit_ntp.NTP(socket, cache_seconds=3600) while True: diff --git a/examples/ntp_powersave.py b/examples/ntp_powersave.py new file mode 100644 index 0000000..fe52c6b --- /dev/null +++ b/examples/ntp_powersave.py @@ -0,0 +1,90 @@ +# SPDX-FileCopyrightText: 2024 H PHil Duby +# SPDX-License-Identifier: MIT + +"""Print out time based on NTP using power saving options between updates.""" + +import os +import time + +import socketpool +import wifi + +from adafruit_ntp import NTP, EventType, NTPIncompleteError + +print("Start PowerSave example") +check_connection = False # pylint:disable=invalid-name +# Get wifi AP credentials from a settings.toml file +wifi_ssid = os.getenv("CIRCUITPY_WIFI_SSID") +wifi_password = os.getenv("CIRCUITPY_WIFI_PASSWORD") +# print(f'have credentials: {time.monotonic_ns() =}') # DEBUG + + +def on_ntp_event(event_type: EventType, next_time: int): + """Handle notifications from NTP about not using the radio for awhile.""" + global check_connection # pylint:disable=global-statement + if event_type == EventType.NO_EVENT: + print( + "No Event: " + f"{time.monotonic_ns() =}, {wifi.radio.enabled =}, {wifi.radio.connected =}" + ) + return + print(f"event {event_type}: Next operation scheduled at {next_time} ns") + if event_type == EventType.LOOKUP_FAILED: + check_connection = True + return + wifi.radio.enabled = False + if event_type == EventType.SYNC_FAILED: + raise RuntimeError("NTP sync failed") + + +def check_wifi_connection(): + """Check if the WiFi connection is still (currently) active.""" + global check_connection # pylint:disable=global-statement + # Always reset the flag to False. Another notification will set it true again as needed. + check_connection = False + # print(f'checking connection: {time.monotonic_ns() =}, {wifi.radio.enabled =}') # DEBUG + if not wifi.radio.enabled: + wifi.radio.enabled = True + if wifi.radio.connected: + return + print("Connecting to WiFi...") + if wifi_ssid is None: + print("WiFi credentials are kept in settings.toml, please add them there!") + raise ValueError("SSID not found in environment variables") + + try: + wifi.radio.connect(wifi_ssid, wifi_password) + except ConnectionError as ex: + print(f"Failed to connect to WiFi with provided credentials: {ex}") + # print(f'done connect attempt: {time.monotonic_ns() =}') # DEBUG + + +def fmt_iso(datetime: time.struct_time) -> str: + """Format the datetime as ISO 8601.""" + return ( + f"{datetime.tm_year}-{datetime.tm_mon:02d}-{datetime.tm_mday:02d}" + + f"T{datetime.tm_hour:02d}:{datetime.tm_min:02d}:{datetime.tm_sec:02d}" + ) + + +pool = socketpool.SocketPool(wifi.radio) +ntp = NTP(pool, blocking=False) +# ntp.register_ntp_event_callback(on_ntp_event, EventType.SYNC_COMPLETE | +# EventType.SYNC_FAILED | EventType.LOOKUP_FAILED) +ntp.register_ntp_event_callback(on_ntp_event, EventType.ALL_EVENTS) +# ntp.register_ntp_event_callback(on_ntp_event, 0b111) # == 7 == 0x7 + +while True: + if check_connection: + check_wifi_connection() + else: + try: + print(fmt_iso(ntp.datetime)) + except NTPIncompleteError: + print("Waiting for NTP information") + except Exception as ex: + print(f"{type(ex)}") + print(f"Exception: {ex}") + raise + # other regular processing … + time.sleep(1) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/mocks/__init__.py b/tests/mocks/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/mocks/mock_pool.py b/tests/mocks/mock_pool.py new file mode 100644 index 0000000..1d558fb --- /dev/null +++ b/tests/mocks/mock_pool.py @@ -0,0 +1,263 @@ +# SPDX-FileCopyrightText: 2024 H PHil Duby +# +# SPDX-License-Identifier: MIT + +""" +Mock code to simulate Socket and SocketPool classes with the functionality needed for +testing adafruit_ntp. + +Usage: + from tests.mock_pool import MockPool, MockCallback, MockSocket + mock_pool = MockPool() + mock_pool.mock_getaddrinfo_attempts.clear() + mock_pool.set_getaddrinfo_responses([«exceptions and values»]) + mock_socket = mock_pool.socket(mock_pool.AF_INET, mock_pool.SOCK_DGRAM) + mock_socket.mock_recv_into_attempts.clear() + mock_socket.mock_sendto_attempts.clear() + mock_socket.set_recv_into_responses([«exceptions and values»]) + mock_socket.set_sendto_responses([«exceptions and values»]) + + ntp = NTP(mock_pool) + + Values in the response lists will be used (popped) one at a time for each call to + getaddrinfo and recv_into. The value will be raised if it is an Exception, or returned + if it is not. This allows multiple responses to be queued for cases where the application + being tested could make multiple calls before exiting back to the testing code. +""" + +try: + from typing import Tuple, Union, Dict, List + + GetAddressInfoT = Tuple[int, int, int, str, Tuple[str, int]] +except ImportError: + pass +from micropython import const # type:ignore +from tests.shared_for_testing import ( + ResourcesDepletedError, + setup_logger, + MOCK_LOGGER, + MOCKED_SOCK_NEW_MSG, + MOCKED_SOCK_INITIALIZED_MSG, + MOCKED_SOCK_INIT_MSG, + MOCKED_SOCK_ENTER_MSG, + MOCKED_SOCK_EXIT_MSG, + MOCKED_SOCK_SETTIMEOUT_MSG, + MOCKED_SOCK_SENDTO_MSG, + MOCKED_SOCK_RECV_INTO_MSG, + MOCKED_SOCK_CLOSE_MSG, + MOCKED_SOCK_SUPER_MSG, + MOCKED_POOL_NEW_MSG, + MOCKED_POOL_INITIALIZED_MSG, + MOCKED_POOL_INIT_MSG, + MOCKED_POOL_INIT_CLEARED_MSG, + MOCKED_POOL_GETADDR_MSG, + MOCKED_POOL_SOCKET_MSG, + MOCKED_NO_RESOURCE_MSG, + MOCKED_CALLBACK_MSG, +) + +_logger = setup_logger(MOCK_LOGGER) + + +class MockSocket: + """Mock Socket for testing NTP + + Use set_recv_into_responses in unittests to configure recv_into behaviour for an + individual instance. Only a single instance will be created (enforced by the singleton + pattern in __new__) for each unique combination of the family, type, and proto arguments. + That allows the controlling unittest code to configure an instance for the arguments that + the application will use, and then the application will get that same instance. + """ + + _instances: Dict[Tuple[int, int, int], "MockSocket"] = {} + + def __new__( + cls, family: int, type: int, proto: int, *args, **kwargs + ): # pylint:disable=redefined-builtin + """Control the creation of instances to enforce singleton pattern.""" + key = (family, type, proto) + _logger.info(MOCKED_SOCK_NEW_MSG % (key,)) + if key not in cls._instances: + _logger.info(MOCKED_SOCK_SUPER_MSG % (key,)) + cls._instances[key] = super(MockSocket, cls).__new__(cls, *args, **kwargs) + return cls._instances[key] + + def __init__( + self, family: int, type: int, proto: int + ): # pylint:disable=redefined-builtin + """simulate Socket""" + if not hasattr(self, "_mock_initialized"): + self.family = family + self.type = type + self.proto = proto + self.timeout = None + self.mock_recv_into_attempts: List[Union[Exception, bytearray]] = [] + self.mock_sendto_attempts: List[Union[Exception, int]] = [] + self.mock_send_packet: List[bytearray] = [] + _logger.info(MOCKED_SOCK_INITIALIZED_MSG % (self.mock_key,)) + self._mock_initialized = ( + True # Prevent re-initialization after setup by test case + ) + _logger.info(MOCKED_SOCK_INIT_MSG % (self.mock_key,)) + + def __enter__(self): + """simulate Socket.__enter__. For use as a context manager""" + _logger.info(MOCKED_SOCK_ENTER_MSG % (self.mock_key,)) + return self + + def __exit__(self, exc_type, exc_value, traceback): + """simulate Socket.__exit__. For use as a context manager""" + _logger.info(MOCKED_SOCK_EXIT_MSG % (self.mock_key,)) + + def settimeout(self, timeout: int) -> None: + """simulate Socket.settimeout""" + _logger.info(MOCKED_SOCK_SETTIMEOUT_MSG, timeout, self.mock_key) + self.timeout = timeout + + def set_recv_into_responses( + self, responses: List[Union[Exception, bytearray]] + ) -> None: + """Set a sequence of responses for the recv_into method.""" + self.mock_recv_into_attempts = responses + + def set_sendto_responses(self, responses: List[Union[Exception, int]]) -> None: + """Set a sequence of responses for the sendto method.""" + self.mock_sendto_attempts = responses + + def sendto(self, packet: bytearray, address: Tuple[str, int]) -> int: + """simulate Socket.sendto. Sending packet to NTP server address. + + Only expected to be single call to sendto from the application. More will continue + to append to the mock_send_packet bytearray, so make sure that is cleaned up before + the application tries to reuse it with recv_into. + """ + _logger.info(MOCKED_SOCK_SENDTO_MSG, address, self.mock_key) + if not self.mock_sendto_attempts: + raise ResourcesDepletedError(MOCKED_NO_RESOURCE_MSG % "sendto") + result: Union[Exception, int] = self.mock_sendto_attempts.pop(0) + if isinstance(result, Exception): + raise result + self.mock_send_packet.append(packet[:]) # better alternative? + return result + + def recv_into(self, packet: bytearray) -> int: + """simulate Socket.recv_into. Receiving packet from NTP server""" + _logger.info(MOCKED_SOCK_RECV_INTO_MSG % (self.mock_key,)) + if not self.mock_recv_into_attempts: + raise ResourcesDepletedError(MOCKED_NO_RESOURCE_MSG % "recv_into") + result: Union[Exception, bytearray] = self.mock_recv_into_attempts.pop(0) + if isinstance(result, Exception): + raise result + packet[:] = result + return len(packet) + + def close(self) -> None: + """simulate Socket.close. Receiving packet from NTP server""" + _logger.info(MOCKED_SOCK_CLOSE_MSG % (self.mock_key,)) + + @property + def mock_key(self) -> Tuple[int, int, int]: + """Return a key for the instance based on the family, type, and proto attributes""" + return (self.family, self.type, self.proto) + + @classmethod + def get_mock_instance(cls) -> Dict[Tuple[int, int, int], "MockSocket"]: + """Get the singleton instance dict for MockSocket.""" + return MockSocket._instances + + @classmethod + def clear_mock_singleton(cls) -> None: + """Testing interface to reset the singleton to 'empty'""" + cls._instances.clear() + + +class MockPool: + """Singleton class to hold the state of the mock socketPool. + + Use set_getaddrinfo_responses in unittests to configure getaddrinfo behaviour. + """ + + AF_INET: int = const(2) + SOCK_DGRAM: int = const(2) + SOCK_STREAM: int = const(1) + IPPROTO_IP: int = const(0) + + _instance: "MockPool" = None + + def __new__(cls, *args, **kwargs): + _logger.debug(MOCKED_POOL_NEW_MSG) + if cls._instance is None: + _logger.debug(MOCKED_POOL_INITIALIZED_MSG) + cls._instance = super(MockPool, cls).__new__(cls, *args, **kwargs) + return cls._instance + + def __init__(self): + # not really needed for the singleton setup, but define the attributes here (as well), + # so pylint doesn't complain about attributes defined outside of init + _logger.debug(MOCKED_POOL_INIT_MSG) + if not hasattr(self, "_mock_initialized"): + _logger.debug(MOCKED_POOL_INIT_CLEARED_MSG) + self.mock_getaddrinfo_attempts: List[Union[Exception, GetAddressInfoT]] = [] + self._mock_initialized = ( + True # Prevent re-initialization after setup by test case + ) + + def getaddrinfo( + self, server: str, port: int + ) -> Tuple[int, int, int, str, Tuple[str, int]]: + """simulate SocketPool.getaddrinfo""" + _logger.info(MOCKED_POOL_GETADDR_MSG, server, port) + if not self.mock_getaddrinfo_attempts: + raise ResourcesDepletedError(MOCKED_NO_RESOURCE_MSG % "getaddrinfo") + result: Union[Exception, GetAddressInfoT] = self.mock_getaddrinfo_attempts.pop( + 0 + ) + if isinstance(result, Exception): + raise result + # SocketPool.gaierror((-2, 'Name or service not known')) + return result + + @staticmethod + def socket( + family: int = AF_INET, + type: int = SOCK_STREAM, # pylint:disable=redefined-builtin + proto: int = IPPROTO_IP, + ) -> MockSocket: + """simulate SocketPool.socket""" + _logger.info(MOCKED_POOL_SOCKET_MSG % (family, type, proto)) + return MockSocket(family, type, proto) + + def set_getaddrinfo_responses( + self, responses: List[Union[Exception, GetAddressInfoT]] + ) -> None: # type:ignore + """Set a sequence of responses for the getaddrinfo method.""" + self.mock_getaddrinfo_attempts = responses + + @classmethod + def get_mock_instance(cls) -> "MockPool": + """Get the singleton instance of MockPool without going through instantiation.""" + if MockPool._instance is None: + raise AttributeError( + "No MockPool instance currently exists. Call MockPool() first." + ) + return MockPool._instance + + @classmethod + def clear_mock_singleton(cls) -> None: + """Testing interface to reset the singleton to 'undefined'""" + cls._instance = None + + +class MockCallback: + """Mock notification callback for testing NTP.""" + + def __init__(self, context: str): + self.context = context + + def mock_callback(self, event: int, delay: int) -> None: + """Log which callback was called, and the triggering event details""" + event_num = event if isinstance(event, int) else event.value + if event_num == 0: + _logger.debug(MOCKED_CALLBACK_MSG % (self.context, event, delay)) + return + _logger.info(MOCKED_CALLBACK_MSG % (self.context, event, delay)) diff --git a/tests/mocks/mock_time.py b/tests/mocks/mock_time.py new file mode 100644 index 0000000..d7e40aa --- /dev/null +++ b/tests/mocks/mock_time.py @@ -0,0 +1,157 @@ +# SPDX-FileCopyrightText: 2024 H PHil Duby +# +# SPDX-License-Identifier: MIT + +""" +Mock code to simulate attributes of the time module for testing adafruit_ntp + +Usage: + import tests.mock_time as mock_time + from tests.mock_time import MockTime + sys.modules['time'] = mock_time + from adafruit_ntp import NTP … + +The adafruit_ntp module will import the time module and get the module functions provided here. +The unittest code will access and manipulate the mock time state through an instance (singleton) +of the MockTime class. + +During unittesting, the mock time can be advanced by calling the mock_time instance's set_mock_ns() +method, and accessed with get_mock_ns(). +""" + +import time + +try: + from typing import Optional +except ImportError: + pass +from tests.shared_for_testing import ( + setup_logger, + fmt_thousands, + MOCK_LOGGER, + NS_PER_SEC, + MOCKED_TIME_DEFAULT_START_NS, + MOCKED_TIME_NEW_MSG, + MOCKED_TIME_FIRST_NEW_MSG, + MOCKED_TIME_MONOTONIC_NS_MSG, + MOCKED_TIME_SLEEP_MSG, + MOCKED_TIME_LOCALTIME_MSG, + MOCKED_TIME_NOT_LOCALTIME_EX, +) + +# Expose struct_time for compatibility +struct_time = time.struct_time # pylint:disable=invalid-name + +# A Logging instance that the UnitTest code can use to monitor calls to the mocked functionality. +_logger = setup_logger(MOCK_LOGGER) + + +class MockTime: + """Singleton class to hold the state of the mock time module.""" + + _instance = None + + def __new__(cls, *args, **kwargs): + _logger.info(MOCKED_TIME_NEW_MSG) + if cls._instance is None: + _logger.info(MOCKED_TIME_FIRST_NEW_MSG) + cls._instance = super(MockTime, cls).__new__(cls, *args, **kwargs) + return cls._instance + + def __init__(self): + """Additional (duplicate) instances should not change the internal singleton state.""" + if not hasattr(self, "_initialized"): + # Do not start at zero. It confuses the app. Use a 'fake' boot time delay instead. + self._monotonic_ns = MOCKED_TIME_DEFAULT_START_NS + self._initialized = True + + def monotonic_ns(self) -> int: + """Simulate time.monotonic_ns, incrementing mock nanoseconds with each call.""" + self._monotonic_ns += 1 + _logger.info(MOCKED_TIME_MONOTONIC_NS_MSG, fmt_thousands(self._monotonic_ns)) + return self._monotonic_ns + + def sleep(self, seconds: float) -> None: + """Simulate time.sleep by advancing mock nanoseconds by the specified duration.""" + _logger.info(MOCKED_TIME_SLEEP_MSG, seconds) + self._monotonic_ns += int(seconds * NS_PER_SEC) + + @staticmethod + def localtime(seconds: Optional[float] = None) -> time.struct_time: + """simulate time.localtime, but only when a seconds value is provided. + + This could be merged into the module level function. Keeping it in the instance to + to keep the functionality in the class, which allows future enhancements that might + want to manipulate the result via the controlling instance. IE return a specific + value instead of using the built in function. + """ + _logger.info(MOCKED_TIME_LOCALTIME_MSG, seconds) + if seconds is None: + # not reasonable to get 'now' time in environment without a real time clock. + raise MOCKED_TIME_NOT_LOCALTIME_EX + return time.localtime(seconds) + + def get_mock_ns(self) -> int: + """ + Get the current mock time in nanoseconds without advancing the clock. + + This bypasses the actual mocked methods to avoid the logging and increment, + allowing direct access for testing purposes. + """ + return self._monotonic_ns + + def set_mock_ns(self, time_ns: int) -> None: + """ + Set the mock time in nanoseconds. + + This is useful in testing scenarios where manual control of the time flow is required. + """ + self._monotonic_ns = time_ns + + @classmethod + def get_mock_instance(cls) -> "MockTime": + """Get the singleton instance of MockTime without going through instantiation.""" + if MockTime._instance is None: + raise AttributeError( + "No MockTime instance currently exists. Call MockTime() first." + ) + return MockTime._instance + + @classmethod + def clear_mock_singleton(cls) -> None: + """Explicitly reset the singleton to 'undefined' for testing control. + + Do **NOT** call this directly from testing code. Use the module level function instead. + """ + cls._instance = None + + +# Mocked module-level functions +_mock_time_instance = MockTime() + + +def monotonic_ns() -> int: + """Module-level function to simulate time.monotonic_ns, using the singleton instance.""" + return _mock_time_instance.monotonic_ns() + + +def sleep(seconds: float) -> None: + """Module-level function to simulate time.sleep, using the singleton instance.""" + _mock_time_instance.sleep(seconds) + + +def localtime(seconds: Optional[float] = None) -> time.struct_time: + """Module-level function to simulate time.localtime, using the singleton instance.""" + return _mock_time_instance.localtime(seconds) + + +def get_mock_instance() -> MockTime: + """Get the (global) singleton instance for MockTime.""" + return _mock_time_instance + + +def clear_mock_singleton() -> None: + """Clear the singleton instance of MockTime for testing purposes, and create a new instance.""" + global _mock_time_instance # pylint:disable=global-statement + MockTime.clear_mock_singleton() + _mock_time_instance = MockTime() diff --git a/tests/ntp_testing_support.py b/tests/ntp_testing_support.py new file mode 100644 index 0000000..fe7eb36 --- /dev/null +++ b/tests/ntp_testing_support.py @@ -0,0 +1,1176 @@ +# SPDX-FileCopyrightText: 2024 H PHil Duby +# +# SPDX-License-Identifier: MIT + +""" +Utilities and other support code for adafruit_ntp unittesting. + +This module contains code that requires imports from adafruit_ntp, which means that +the mock time module must be setup first. This can NOT be merged into +shared_for_testing, because that contains code using the real time module. + +This probably CAN be merged, by explicitly importing the specific time module attributes +before the override is done. +""" +# pylint:disable=too-many-lines + +from collections import namedtuple +from errno import ETIMEDOUT +import os +import sys +import unittest + +try: + from typing import Callable, Optional, Tuple, Dict, List, Set + from tests.shared_for_testing import LogTupleT +except ImportError: + pass +from tests.shared_for_testing import ( + ListHandler, + fmt_thousands, + mock_info, + get_context_exception, + DEFAULT_NTP_ADDRESS, + NS_PER_SEC, + MOCKED_CALLBACK_MSG, + MOCKED_TIME_DEFAULT_START_NS, + MOCKED_TIME_LOCALTIME_MSG, + MOCKED_TIME_MONOTONIC_NS_MSG, + MOCKED_TIME_SLEEP_MSG, + MOCKED_POOL_GETADDR_MSG, + MOCKED_POOL_SOCKET_MSG, + MOCKED_SOCK_NEW_MSG, + MOCKED_SOCK_INIT_MSG, + MOCKED_SOCK_ENTER_MSG, + MOCKED_SOCK_SETTIMEOUT_MSG, + MOCKED_SOCK_EXIT_MSG, + MOCKED_SOCK_SENDTO_MSG, + MOCKED_SOCK_RECV_INTO_MSG, + NTP_PACKET_SIZE, + NTP_SERVER_IPV4_ADDRESS, + ADDR_SOCK_KEY, + NTP_ADDRESS_PORT, + GETADDR_DNS_EX, + IP_SOCKET, +) +from tests.simulate_ntp_packet import create_ntp_packet, iso_to_nanoseconds +from tests.mocks.mock_pool import MockPool # , MockCallback, MockSocket +from tests.mocks import mock_time +from tests.mocks.mock_time import MockTime + +# Set/Replace the time module with the mock version. All requests (imports) of time for the +# rest of the session should get the mock. +sys.modules["time"] = mock_time +# Import the adafruit_ntp module components, which should import the mock versions of modules +# defined (imported) above, unless they are injected on instantiation. +from adafruit_ntp import ( # pylint:disable=wrong-import-position + NTP, + EventType, + _IntFlag, + NTPIncompleteError, +) # pylint:disable=wrong-import-position + +INCOMPLETE_MSG: str = "No NTP synchronization has been successful yet" +INCOMPLETE_EX = NTPIncompleteError(INCOMPLETE_MSG) +SENDTO_BROKENPIPE_EX = BrokenPipeError(32) +try: + RECV_INTO_TIMEDOUT_EX = OSError(ETIMEDOUT, os.strerror(ETIMEDOUT)) +except AttributeError: + # CircuitPython does not have os.strerror + RECV_INTO_TIMEDOUT_EX = OSError(ETIMEDOUT, "ETIMEDOUT") + +NTPStateMachine = namedtuple( + "NTPStateMachine", ("GETTING_SOCKET", "GETTING_PACKET", "USING_CACHED") +) +BackoffConfig = namedtuple("BackoffConfig", ("first", "maximum", "factor")) + +NTPSTATE_FIELDS = { + "packet", + "port", + "server", + "socket_address", + "tz_offset_ns", + "socket_timeout", + "cache_ns", + "monotonic_start_ns", + "next_sync", + "last_sync_time", + "callbacks", + "state", + "blocking", + "monotonic_ns", + "limit_end", + "limit_delay", +} + + +class NTPState: # pylint:disable=too-many-instance-attributes + """Storage for internal NTP instance state information""" + + def __init__( + self, + *, # pylint:disable=too-many-arguments,too-many-locals + server: str = "", + port: int = 123, + socket_address: Optional[Tuple[str, int]] = None, + packet: bytearray = None, + tz_offset_ns: int = 0, + socket_timeout: int = 10, + cache_ns: int = 0, + monotonic_start_ns: int = 0, + next_sync: int = 0, + last_sync_time: int = 0, + callbacks: Dict[Callable[[_IntFlag, int], None], _IntFlag] = None, + state: int = 0, + blocking: bool = True, + monotonic_ns: int = 0, # mocked functionality internal state information + limit_end: int = 0, + limit_delay: int = 0, + ): + # NTP instance internal state information + self.server: str = server + self.port: int = port + self.socket_address: Optional[Tuple[str, int]] = socket_address + self.packet: bytearray = bytearray(packet) if packet else packet + self.tz_offset_ns: int = tz_offset_ns + self.socket_timeout: int = socket_timeout + self.cache_ns: int = cache_ns + self.monotonic_start_ns: int = monotonic_start_ns + self.next_sync: int = next_sync + self.last_sync_time: int = last_sync_time + self.callbacks: Dict[Callable[[_IntFlag, int], None], _IntFlag] = ( + {} if callbacks is None else dict(callbacks) + ) + self.state: int = state + self.blocking: bool = blocking + # mocked functionality internal state information : time + self.monotonic_ns: int = monotonic_ns + self.limit_end: int = limit_end + self.limit_delay: int = limit_delay + # mocked functionality internal state information : socket + # used and discarded. No state or socket reference kept. + # mocked functionality internal state information : pool + # no state information used. + + def copy(self) -> "NTPState": + """Create a deep copy of the current instance.""" + # Create a new instance of the class + duplicate_state = NTPState() + # Copy the (data) attributes from the original instance + simple_fields = NTPSTATE_FIELDS - {"packet", "callbacks"} + for field in simple_fields: + setattr(duplicate_state, field, getattr(self, field)) + # Need to create a copy of the packet and callbacks, not just get a shared reference + duplicate_state.packet = None if self.packet is None else bytearray(self.packet) + duplicate_state.callbacks.update(self.callbacks) + + return duplicate_state + + +DEFAULT_NTP_STATE = NTPState( + server=DEFAULT_NTP_ADDRESS[0], + port=DEFAULT_NTP_ADDRESS[1], + socket_address=None, + packet=bytearray((0,) * NTP_PACKET_SIZE), + tz_offset_ns=0, + socket_timeout=10, + cache_ns=60 * NS_PER_SEC, + monotonic_start_ns=0, + next_sync=0, + last_sync_time=0, + callbacks={}, + state=NTP.USING_CACHED_REFERENCE, + blocking=True, + monotonic_ns=MOCKED_TIME_DEFAULT_START_NS, + limit_end=None, + limit_delay=None, +) + + +class BaseNTPTest(unittest.TestCase): + """Base class for NTP unittesting.""" + + # attributes to be initialized by child classes + _log_handler: ListHandler = None + ntp: NTP = None + mock_time: MockTime = None + mock_pool: MockPool = None + mock_socket: MockPool.socket = None + # Constants + DNS_BACKOFF = BackoffConfig(int(3 * NS_PER_SEC), int(60 * NS_PER_SEC), 1.5) + NTP_STATE_MACHINE = NTPStateMachine( + NTP.GETTING_SOCKET, NTP.GETTING_PACKET, NTP.USING_CACHED_REFERENCE + ) + + def _set_and_verify_ntp_state(self, target_state: NTPState) -> NTPState: + """Set the NTP instance to a known state, and verify that it got set.""" + set_ntp_state(target_state, self.ntp) + actual_state = get_ntp_state(self.ntp) + unmatched = match_expected_field_values(target_state, actual_state) + self.assertEqual( + unmatched, set(), f"NTP state fields {unmatched} do not match intended" + ) + return actual_state + + def _iterate_dns_failure( + self, count: int, start: NTPState, notify: Optional[str] + ) -> None: + """ + Iterate DNS failure checks with exponential backoff retry checks. + + :param int count: Number of times to iterate the DNS failure. + :param NTPState start: Base NTP state for the test scenario mode. + :param Optional[str] notify: Notification context for the test. + """ + expected_state = self._set_and_verify_ntp_state(start) + initial_time = self._prime_expected_dns_fail_state(expected_state) + for _ in range( + count + ): # Do enough iterations to get into the maximum rate limiting delay + self._dns_failure_cycle(expected_state, notify) + if not expected_state.blocking: + # need to advance time to get past each rate limiting delay + saved_ns = self.mock_time.get_mock_ns() + self.mock_time.set_mock_ns(expected_state.limit_end) + expected_state.monotonic_ns = expected_state.limit_end + if not expected_state.blocking: + # Undo the final time advance + self.mock_time.set_mock_ns(saved_ns) + expected_state.monotonic_ns = saved_ns + self._post_dns_failures_check(initial_time) + + def _configure_send_fail( + self, start: NTPState, notify: Optional[str] + ) -> Tuple[NTPState, List[LogTupleT], int]: + """ + Do the common configuration for an NTP send failure test. + + :param NTPState start: Base NTP state for the test scenario mode. + :param Optional[str] notify: Notification context for the test. + """ + # This is an unusual state for legacy mode. The only way that this should be possible + # (initially) is for the network connection to drop after the successful DNS lookup, + # but before sending the NTP packet. On following NTP cache timeouts, this can occur + # because the DNS cache can return a valid looking socket even though the connection + # is down. + start.state = self.NTP_STATE_MACHINE.GETTING_PACKET + start.socket_address = NTP_ADDRESS_PORT + start.limit_delay = self.DNS_BACKOFF.first + start.limit_end = 0 + + self.mock_socket.set_sendto_responses([SENDTO_BROKENPIPE_EX]) + self.mock_socket.mock_recv_into_attempts.clear() # Should not get far enough to need. + send_packet: bytearray = create_ntp_packet(mode=3) # client + send_packet[1:] = bytearray(len(send_packet) - 1) # fill with zeros after mode + + expected_state = self._set_and_verify_ntp_state(start) + expected_state.state = self.NTP_STATE_MACHINE.GETTING_SOCKET + expected_state.packet[:] = send_packet + should_log: List[LogTupleT] = [] + next_ns = logged_for_time_request(should_log) + next_ns = logged_for_send_fail(should_log, next_ns, expected_state, notify) + expected_state.monotonic_ns = next_ns - 1 + expected_state.limit_end = ( + expected_state.monotonic_ns + expected_state.limit_delay + ) + expected_state.limit_delay = min( + self.DNS_BACKOFF.maximum, + int(expected_state.limit_delay * self.DNS_BACKOFF.factor), + ) + return expected_state, should_log, next_ns + + def _configure_send_retry_fail( + self, expected_state: NTPState, notify: Optional[str] + ) -> Tuple[List[LogTupleT]]: + """ + Do the common configuration for an NTP send retry failure test. + + :param NTPState expected_state: NTP state from the previous attempt for the test + scenario mode. + :param Optional[str] notify: Notification context for the test. + """ + self.mock_pool.set_getaddrinfo_responses([IP_SOCKET]) + self.mock_socket.set_sendto_responses([SENDTO_BROKENPIPE_EX]) + should_log: List[LogTupleT] = [] + next_ns = logged_for_time_request(should_log) + save_ns = next_ns # DEBUG + next_ns = logged_for_get_address( + should_log, next_ns, expected_state, expected_state.limit_end, notify + ) + print(f"retry fail: {save_ns =}, {next_ns =}") + next_ns = logged_for_send_fail(should_log, next_ns, expected_state, notify) + expected_state.monotonic_ns = next_ns - 1 + expected_state.limit_end = ( + expected_state.monotonic_ns + expected_state.limit_delay + ) + expected_state.limit_delay = min( + self.DNS_BACKOFF.maximum, + int(expected_state.limit_delay * self.DNS_BACKOFF.factor), + ) + return should_log, next_ns + + def _configure_ntp_fail( + self, start: NTPState, notify: Optional[str] + ) -> Tuple[NTPState, List[LogTupleT], int]: + """ + Do the common configuration for an NTP receive failure test. + + :param NTPState start: Base NTP state for the test scenario mode. + :param Optional[str] notify: Notification context for the test. + """ + # This is an unusual state for legacy mode. Only way to get here seems to be to start + # in non-blocking mode, then change to blocking after the DNS lookup is finished. + start.state = self.NTP_STATE_MACHINE.GETTING_PACKET + start.socket_address = NTP_ADDRESS_PORT + start.limit_delay = self.DNS_BACKOFF.first # Shouldn't really matter + start.limit_end = 0 # Shouldn't really matter + + self.mock_socket.set_sendto_responses([NTP_PACKET_SIZE]) + self.mock_socket.set_recv_into_responses([RECV_INTO_TIMEDOUT_EX]) + send_packet: bytearray = create_ntp_packet(mode=3) # client + send_packet[1:] = bytearray(len(send_packet) - 1) # fill with zeros after mode + + expected_state = self._set_and_verify_ntp_state(start) + expected_state.state = self.NTP_STATE_MACHINE.USING_CACHED + expected_state.packet[:] = send_packet + should_log: List[LogTupleT] = [] + next_ns = logged_for_time_request(should_log) + expected_state.monotonic_ns = next_ns + 1 + expected_state.next_sync = expected_state.cache_ns + expected_state.monotonic_ns + next_ns = logged_for_ntp_packet(should_log, next_ns, expected_state, notify) + return expected_state, should_log, next_ns + + def _configure_good_ntp( + self, start: NTPState, notify: Optional[str] + ) -> Tuple[NTPState, List[LogTupleT]]: + """ + Do the common configuration for a successful NTP test. + + - Adjust the starting state to match what is needed to have the NTP instance attempt + to get an NTP packet from the server. That means being in the correct state, and + having a good socket. + - Set the mock so the request will get a good (test) ntp packet. + - Populate what the instance state and log should contain after the request. + + :param NTPState start: Base NTP state for the test scenario mode. + :param Optional[str] notify: Notification context for the test. + """ + # This is an unusual state for blocking mode. The only way to get here seems to be to + # start in non-blocking mode, then change to blocking after the DNS lookup is finished. + start.state = self.NTP_STATE_MACHINE.GETTING_PACKET + start.socket_address = NTP_ADDRESS_PORT + start.limit_delay = self.DNS_BACKOFF.first # Shouldn't really matter + start.limit_end = 0 # Shouldn't really matter + + ntp_base_iso = "2024-01-01T10:11:12.987654321" + ntp_receive_delta, ntp_transmit_delta, good_receive_packet = ( + create_ntp_packet_for_iso(ntp_base_iso) + ) + # Queue a good NTP packet for the mock to supply when requested. + self.mock_socket.set_sendto_responses([NTP_PACKET_SIZE]) + self.mock_socket.set_recv_into_responses([good_receive_packet]) + # Push the adjusted starting state information to the test instance: base expected state + expected_state = self._set_and_verify_ntp_state(start) + expected_state.state = self.NTP_STATE_MACHINE.USING_CACHED + expected_state.packet[:] = good_receive_packet + should_log: List[LogTupleT] = [] + next_ns = logged_for_time_request(should_log) + expected_state.monotonic_ns = next_ns + 2 + expected_state.last_sync_time = expected_state.monotonic_ns - 1 + expected_state.next_sync = ( + expected_state.cache_ns + expected_state.last_sync_time + ) + expected_state.monotonic_start_ns = ( + iso_to_nanoseconds(ntp_base_iso) + + ntp_receive_delta + + ntp_transmit_delta // 2 + - expected_state.last_sync_time + - 1 + ) + next_ns = logged_for_ntp_packet(should_log, next_ns, expected_state, notify) + logged_for_time_reference(should_log, next_ns, False) + return expected_state, should_log + + def _configure_cached_ntp(self, iso_time: str) -> NTPState: + """ + Create an NTP state configuration for an already synchronized scenario. + + :param str iso_time: The time to use for the cached synchronization offset. + :return: NTPState: The configured NTP state. + """ + start = DEFAULT_NTP_STATE.copy() + start.monotonic_start_ns = iso_to_nanoseconds(iso_time) + start.state = self.NTP_STATE_MACHINE.USING_CACHED + + start.last_sync_time = NS_PER_SEC + 1000 + ntp_receive_delta, ntp_transmit_delta, good_receive_packet = ( + create_ntp_packet_for_iso(iso_time) + ) + start.packet[:] = good_receive_packet + start.next_sync = start.cache_ns + start.last_sync_time + start.monotonic_start_ns = ( + iso_to_nanoseconds(iso_time) + + ntp_receive_delta + + ntp_transmit_delta // 2 + - start.last_sync_time + - 1 + ) + return start + + def _cached_ntp_operation_and_check_results(self, start: NTPState) -> None: + """ + Check the result when should be using cached NTP synchronization offset. + + :param NTPState start: The initial NTP state. + :return: None + """ + expected_state = self._set_and_verify_ntp_state(start) + self.mock_time.set_mock_ns(expected_state.last_sync_time) + # self.mock_time.set_mock_ns(expected_state.next_sync - 2) # upto .next_sync - 2 + should_log: List[LogTupleT] = [] + next_ns = logged_for_time_request(should_log) + logged_for_time_reference(should_log, next_ns, False) + expected_state.monotonic_ns = next_ns + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + def _good_ntp_operation_and_check_result( + self, expected_state: NTPState, expected_log: Tuple[LogTupleT] + ) -> None: + """ + Perform an NTP time request (successful) operation for the configured state and check the + result. + + :param NTPState expected: Expected NTP state after the operation attempt. + :param Tuple[LogTupleT] expected_log: Expected log records from the operation. + :return: None + """ + time_ns = self.ntp.utc_ns + expected_ns = expected_state.monotonic_start_ns + expected_state.monotonic_ns + self.assertEqual( + time_ns, + expected_ns, + "Expected nanoseconds value of " + f"{expected_ns:,}, but got {time_ns:,}, delta = {expected_ns - time_ns}", + ) + self._verify_expected_operation_state_and_log(expected_state, expected_log) + + def _verify_expected_operation_state_and_log( + self, expected_state: NTPState, expected_log: Tuple[LogTupleT] + ) -> None: + """ + Common state and log verification for NTP operation results. + + :param NTPState expected_state: The expected NTP state after the operation attempt. + :param Tuple[LogTupleT] expected_log: Expected log records from the operation. + :return: None + """ + verify_generic_expected_state_and_log( + self, + expected_state, + expected_log, + "NTP state fields %s do not match expected for NTP operation", + "Configured NTP operation log records should be:\n%s; actually got:\n%s", + ) + + def _fail_ntp_operation_and_check_result( + self, + exception: Exception, + expected_state: NTPState, + expected_log: Tuple[LogTupleT], + ) -> None: + """ + Perform an NTP time request (failing) operation for the configured state and check the + result. + + :param Exception exception: Specific exception that is expected. + :param NTPState expected: Expected NTP state after the operation attempt. + :param Tuple[LogTupleT] expected_log: Expected log records from the operation. + :return: None + """ + with self.assertRaises(type(exception)) as context: + time_ns = self.ntp.utc_ns + raise AssertionError( + f"Should have raised {type(exception)}, got {fmt_thousands(time_ns)}" + ) + exc_data = get_context_exception(context) + self.assertEqual(repr(exc_data), repr(exception)) + self._verify_expected_operation_state_and_log(expected_state, expected_log) + + def _bound_check_result_verification( + self, expected_state: NTPState, should_log: List[LogTupleT] + ) -> None: + """ + Common result verification for rate limiting delay interval boundary checks. + + :param NTPState expected: The expected NTP state after the test. + :param List[LogTupleT] should_log: The expected generated log records. + """ + expected_log = tuple(should_log) + self._fail_ntp_operation_and_check_result( + INCOMPLETE_EX, expected_state, expected_log + ) + self._log_handler.log_records.clear() + + def _before_boundary_check(self, start: NTPState, start_time_point: int) -> None: + """ + Check the result when should be before the end of the rate limiting delay interval. + + :param NTPState start: The initial NTP state. + :param int start_time_point: The time point at the start of the test. + """ + expected_state = self._set_and_verify_ntp_state(start) + self.mock_time.set_mock_ns(start_time_point) + should_log: List[LogTupleT] = [] + expected_state.monotonic_ns = start_time_point + 2 + next_ns = logged_for_time_request(should_log) + logged_for_rate_limit_skip(should_log, next_ns) + self._bound_check_result_verification(expected_state, should_log) + + def _past_boundary_check(self, start: NTPState, start_time_point: int) -> None: + """ + Check the result when should be past the rate limiting delay interval. + + :param NTPState start: The initial NTP state. + :param int start_time_point: The time point at the start of the test. + """ + expected_state = self._set_and_verify_ntp_state(start) + self.mock_time.set_mock_ns(start_time_point) + should_log: List[LogTupleT] = [] + expected_state.monotonic_ns = start_time_point + 3 + expected_state.limit_end = ( + expected_state.monotonic_ns + expected_state.limit_delay + ) + expected_state.limit_delay = int( + expected_state.limit_delay * self.DNS_BACKOFF.factor + ) + next_ns = logged_for_time_request(should_log) + logged_for_get_address( + should_log, next_ns, expected_state, start.limit_end, None + ) + self._bound_check_result_verification(expected_state, should_log) + + def _prime_expected_dns_fail_state(self, expected_state: NTPState) -> int: + """ + Initialize the expected NTP state to match a dummy previous DNS fail cycle. + + This sets the expected state to values that the initial _dns_failure_state 'step' update + will advance to the correct starting expected state for the first cycle of the DNS + failure case tests. + + :param NTPState expected: Base expected NTP state for the test scenario. + """ + expected_state.monotonic_ns = self.mock_time.get_mock_ns() + expected_state.state = self.NTP_STATE_MACHINE.GETTING_SOCKET + expected_state.limit_delay = self.DNS_BACKOFF.first + expected_state.limit_end = expected_state.monotonic_ns + 2 + return expected_state.monotonic_ns + + def _dns_failure_state(self, expected_state: NTPState) -> None: + """ + Update expected ntp instance state for next dns lookup and failure. + + :param NTPState expected: The ntp instance state before attempt the dns lookup. + """ + # additional (non-state) test case scenario setup + self._log_handler.log_records.clear() + # expected ntp instance state changes + sleep_ns = 0 + if expected_state.blocking: + sleep_s = ( + expected_state.limit_end - expected_state.monotonic_ns - 2 + ) / NS_PER_SEC + sleep_ns = int(sleep_s * NS_PER_SEC) + if ( + expected_state.blocking + or expected_state.monotonic_ns + 3 > expected_state.limit_end + ): + # actual dns lookup attempt will be made + expected_state.monotonic_ns = expected_state.monotonic_ns + 3 + sleep_ns + expected_state.limit_end = ( + expected_state.monotonic_ns + expected_state.limit_delay + ) + expected_state.limit_delay = min( + self.DNS_BACKOFF.maximum, + int(expected_state.limit_delay * self.DNS_BACKOFF.factor), + ) + else: # early incomplete exit during rate limiting wait + # limit values do not change in the middle of a rate limiting delay + expected_state.monotonic_ns = expected_state.monotonic_ns + 2 + + def _dns_failure_cycle( + self, expected_state: NTPState, notify: Optional[str] + ) -> None: + """ + Set up and execute next dns lookup and failure. + + Input expected_state is from the previous cycle, so is the starting state for the + next cycle. Updated in place instead of duplicated. + + :param NTPState expected_state: The ntp instance state before the dns lookup attempt. + :param Optional[str] notify: The name of the notification context being used. + """ + previous_end = ( + 0 + if expected_state.limit_delay == self.DNS_BACKOFF.first + else expected_state.limit_end + ) + self._dns_failure_state(expected_state) + should_log: List[LogTupleT] = [] + next_ns = logged_for_time_request(should_log) + logged_for_get_address( + should_log, next_ns, expected_state, previous_end, notify + ) + expected_log = tuple(should_log) + exception = GETADDR_DNS_EX if expected_state.blocking else INCOMPLETE_EX + self._fail_ntp_operation_and_check_result( + exception, expected_state, expected_log + ) + + def _post_dns_failures_check(self, cycle_start: int) -> None: + """ + Verification done after a sequence of dns lookup attempts that all fail. + + Test conditions are dependent on the number of cycles (attempts), and on the rate limiting + backoff configuration (self.DNS_BACKOFF) + + :param int cycle_start: The mock monotonic nanoseconds time point that the sequence is + started. + """ + # pylint:disable=protected-access + self.assertEqual( + self.ntp._next_rate_limit_delay, + self.DNS_BACKOFF.maximum, + "Should have reached the maximum retry delay of " + f"{self.DNS_BACKOFF.maximum}: currently at {self.ntp._next_rate_limit_delay}", + ) + minimum_elapsed = 200 # seconds + min_ns = NS_PER_SEC * minimum_elapsed + cycle_start + now = self.mock_time.get_mock_ns() + self.assertTrue( + now > min_ns, + "Should have used at least " + f"{minimum_elapsed} seconds: elapsed {now - cycle_start:,} nanoseconds", + ) + + def _expected_for_any_get_dns( + self, start: NTPState, notify: Optional[str] + ) -> Tuple[NTPState, Tuple[LogTupleT]]: + """ + Common expected state and log configuration for successful DNS lookup. + + When in blocking more, a successful DNS lookup continues to do a NTP packet request. That + means the expected result and logging have additional changes. + + :param NTPState start: The initial NTP state. + :param Optional[str] notify: The notification context or None + :returns Tuple[NTPState, int, List[LogTupleT]]: The calculated state, ending nanosecond, + and the expected log records. + """ + self.mock_pool.set_getaddrinfo_responses( + [ + IP_SOCKET, + ] + ) + expected_state = self._set_and_verify_ntp_state(start) + expected_state.socket_address = NTP_ADDRESS_PORT + expected_state.limit_delay = self.DNS_BACKOFF.first + expected_state.limit_end = 0 + if start.blocking: + # Attempts to get NTP packet right after getting DNS + self.mock_socket.set_sendto_responses([NTP_PACKET_SIZE]) + self.mock_socket.set_recv_into_responses([RECV_INTO_TIMEDOUT_EX]) + send_packet: bytearray = create_ntp_packet(mode=3) # client + send_packet[1:] = bytearray( + len(send_packet) - 1 + ) # fill with zeros after mode + expected_state.packet[:] = send_packet + + should_log: List[LogTupleT] = [] + next_ns = logged_for_time_request(should_log) + start_end = 0 if start.limit_end is None else start.limit_end + next_ns = logged_for_get_address( + should_log, next_ns, expected_state, start_end, notify + ) + if start.blocking: + expected_state.next_sync = expected_state.cache_ns + next_ns + 1 + expected_state.state = self.NTP_STATE_MACHINE.USING_CACHED # no change + next_ns = logged_for_ntp_packet(should_log, next_ns, expected_state, notify) + else: + expected_state.state = self.NTP_STATE_MACHINE.GETTING_PACKET + + if start.monotonic_start_ns > 0: + # When have previous cached offset, use it + next_ns = logged_for_time_reference(should_log, next_ns, False) + expected_state.monotonic_ns = next_ns - 1 + expected_log = tuple(should_log) + return expected_state, expected_log + + def _fallback_dns_fail(self, start: NTPState, notify: Optional[str]) -> None: + """ + Common testing for failing DNS lookup attempts when a cached offset already exists. + + :param NTPState start: The initial NTP state. + :param Optional[str] notify: The notification context or None + :return: None + """ + start.monotonic_ns = start.next_sync # Earliest time that cache will be expired + expected_state = self._set_and_verify_ntp_state(start) + expected_state.state = self.NTP_STATE_MACHINE.GETTING_SOCKET + previous_delay = ( + self.DNS_BACKOFF.first if start.limit_delay is None else start.limit_delay + ) + expected_state.limit_delay = int(previous_delay * self.DNS_BACKOFF.factor) + should_log: List[LogTupleT] = [] + next_ns = logged_for_time_request(should_log) + expected_state.limit_end = self.DNS_BACKOFF.first + next_ns + 1 + start_end = 0 if start.limit_end is None else start.limit_end + next_ns = logged_for_get_address( + should_log, next_ns, expected_state, start_end, notify + ) + + logged_for_time_reference(should_log, next_ns, False) + expected_state.monotonic_ns = next_ns + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + +def get_ntp_state(ntp: NTP) -> NTPState: + """ + Extract the current state of the given NTP instance into an NTPState dataclass. + + The NTP instance state includes state from the (mocked) instance it uses. + + :param ntp: NTP: The NTP instance whose state is to be extracted. + :return: NTPState: A dataclass instance representing the current state of the NTP instance. + """ + # pylint:disable=protected-access + limit_delay = getattr(ntp, "_next_rate_limit_delay", None) + limit_end = getattr(ntp, "_next_rate_limit_end", None) + return NTPState( + server=ntp._server, + port=ntp._port, + socket_address=ntp._socket_address, + packet=bytearray(ntp._packet), + tz_offset_ns=ntp._tz_offset_ns, + socket_timeout=ntp._socket_timeout, + cache_ns=ntp._cache_ns, + monotonic_start_ns=ntp._monotonic_start_ns, + next_sync=ntp._next_sync, + last_sync_time=ntp._last_sync_time, + callbacks=dict(ntp._callbacks), + state=ntp._state, + blocking=ntp._blocking, + limit_delay=limit_delay, + limit_end=limit_end, + monotonic_ns=MockTime.get_mock_instance().get_mock_ns(), + ) + + +def set_ntp_state(state: NTPState, ntp: NTP) -> None: + """ + Set the state of the given NTP instance from an NTPState dataclass. + + The NTP instance state includes state from the (mocked) instance it uses. + + :param state: NTPState: A dataclass instance representing the state to be restored to the NTP + instance. + :param ntp: NTP: The NTP instance whose state is to be set. + :return: None + """ + # pylint:disable=protected-access + ntp._server = state.server + ntp._port = state.port + ntp._socket_address = state.socket_address + ntp._packet[:] = state.packet + ntp._tz_offset_ns = state.tz_offset_ns + ntp._socket_timeout = state.socket_timeout + ntp._cache_ns = state.cache_ns + ntp._monotonic_start_ns = state.monotonic_start_ns + ntp._next_sync = state.next_sync + ntp._last_sync_time = state.last_sync_time + ntp._callbacks.clear() + ntp._callbacks.update(state.callbacks) + ntp._state = state.state + ntp._blocking = state.blocking + ntp._next_rate_limit_delay = state.limit_delay + ntp._next_rate_limit_end = state.limit_end + MockTime.get_mock_instance().set_mock_ns(state.monotonic_ns) + + +def create_ntp_packet_for_iso(iso_time: str) -> Tuple[int, int, bytearray]: + """ + Create an NTP packet as received from an NTP server. + + :param str iso_time: The UTC time to use as a reference point on the server. + :returns Tuple[int, int, bytearray]: + Offset from iso_time in nanoseconds used for the received (from client) timestamp. + Offset from iso_time in nanoseconds used for the transmitted (from server) timestamp. + NTP packet + """ + ntp_receive_delta = ( + 10_012_345_678 # 10 seconds + adjust to get all 9's ns from 987654321 + ) + ntp_transmit_delta = 500_000_002 # just over half a second + good_receive_packet = create_ntp_packet( + iso=iso_time, + leap=0, + mode=4, # Server + stratum=3, + poll=7, + precision=-9, + root_delay=1_543_210_987, + root_dispersion=567_432, + ipv4=NTP_SERVER_IPV4_ADDRESS, + ref_delta=0, # keep at zero, to calculations using delta values work + receive_delta=ntp_receive_delta, + transmit_delay=ntp_transmit_delta, + ) + return ntp_receive_delta, ntp_transmit_delta, good_receive_packet + + +def _changed_state_fields( + state1: NTPState, state2: NTPState, fields: Optional[Set[str]] = None +) -> Set[str]: + """ + Compare two NTPState instances and return the names of the attributes that are different. + + :param state1: NTPState: The first NTPState instance. + :param state2: NTPState: The second NTPState instance. + :param fields: Optional[Set[str]]: A set of attribute names to check for changes. If None, + compare all attributes. + :return: Set[str]: A set of attribute names that have different values between the two states. + :raises AttributeError: If a specified field name does not exist in NTPState. + """ + if fields is not None: + # Ensure that all provided field names are valid attributes of NTPState + invalid_fields = fields - NTPSTATE_FIELDS + if invalid_fields: + raise AttributeError( + f"Invalid NTPState field names: {', '.join(invalid_fields)}" + ) + + fields = NTPSTATE_FIELDS if fields is None else fields + callbacks_field = {"callbacks"} + changed_fields = set() + + for field in fields - callbacks_field: + if getattr(state1, field) != getattr(state2, field): + changed_fields.add(field) + + for field in fields & callbacks_field: + # Need to handle the callbacks dict separately. The way that python generates id values + # for (at least) callable objects means that a direct compare of dictionaries fails. The + # 'duplicate' Callable references are not equal, even though they refer to the same object + # in memory. + # All attempts to find a data structure and comparison code to match expected and actual + # state dictionary callable instance have failed. An expected entry created from exactly + # the same method as was registered still compares as not equal. The best can apparently + # be done, is to make sure that the registered event masks are the same. + callback1 = getattr(state1, field) + callback2 = getattr(state2, field) + # Forcing all values to IntFlag instances seems easier than extracting the .value when the + # dict value is an IntFlag. Need to get them to be consistent for the sorting (on + # circuitpython) to work. Otherwise can get. + # TypeError: unsupported types for __lt__: 'int', 'IntFlag' + # Either cpython automatically reverse the comparison, or it happens to only compare IntFlag + # to int, and not int to IntFlag. + if sorted([_IntFlag(v) for v in callback1.values()]) != sorted( + [_IntFlag(v) for v in callback2.values()] + ): + changed_fields.add(field) + + return changed_fields + + +def match_expected_field_values( + expected_state: NTPState, actual_state: NTPState, fields: Optional[Set[str]] = None +) -> Set[str]: + """ + Compare all or selected fields of state information + + :param expected_state: NTPState: The expected NTP instance state. + :param actual_state: NTPState: The actual NTP instance state. + :param fields: Optional[Set[str]]: A set of attribute names to check for changes. If None, + compare all attributes. + :return: Set[str]: A set of attribute names that have different values between the two states. + :raises AttributeError: If a specified field name does not exist in NTPState. + """ + dict_fields = {"callbacks"} + unmatched = _changed_state_fields(expected_state, actual_state, fields) + for field in unmatched: + if field in dict_fields: + expected_dict = getattr(expected_state, field) + state_dict = getattr(actual_state, field) + # keys could be different, but values should be the same, though order could change + print(f"NTP state.{field} expected:") + for key, value in expected_dict.items(): + print(f' "{str(key)}": {value})') + print(f"NTP state.{field} actual:") + for key, value in state_dict.items(): + print(f' "{str(key)}": {value})') + continue + print( + f'NTP state.{field} expected "{getattr(expected_state, field)}", ' + + f'found "{getattr(actual_state, field)}"' + ) + return unmatched + + +def verify_generic_expected_state_and_log( # pylint:disable=invalid-name + case: BaseNTPTest, + expected_state: NTPState, # pylint:disable=invalid-name + expected_log: Tuple[LogTupleT], + unmatched_message: str, + log_message: str, +) -> None: + """ + Common state and log verification for NTP tests. + + :param NTPState expected_state: The expected NTP state after the operation attempt. + :param Tuple[LogTupleT] expected_log: Expected log records from the operation. + :param str unmatched_message: Message format for unmatched state fields. + :param str log_message: Message format for expected and actual log records. + :return: None + """ + unmatched = match_expected_field_values(expected_state, get_ntp_state(case.ntp)) + case.assertEqual(unmatched, set(), unmatched_message % unmatched) + + actual_log = case._log_handler.to_tuple() # pylint:disable=protected-access + log_args = (expected_log, actual_log) if expected_log else (actual_log,) + case.assertEqual(actual_log, expected_log, log_message % log_args) + + +# Some helper methods to build a tuple of expected log records generated by (NTP instance) +# calls to the various mocked methods. This will be compared with what is collected by +# the logger (mogger). The helpers append expected log records to a supplied list, so the +# final step will be to convert the list to a tuple to compare with _log_handler.to_tuple(). +# Populate any needed mock configuration response buffers before generating the expected +# log records. Some of the helper methods access the buffers to decide which log records +# should be expected. + + +def logged_for_time_request(log_records: List[LogTupleT]) -> int: + """ + Create the log records expected when requesting a time reference. + + Requesting the current time with the utc_ns or datetime property always starts with a + single request to «mock» monotonic_ns(). From there, multiple conditional branches + can occur. After that, if an offset exists, another monotonic_ns call is done to get + the actual time reference to use. Adding the log records for that is handled separately. + + :param List[LogTupleT] log_records: The list to append the log records to. + :param int time_reference: The time reference value to log. + :return: The incremented time reference, which is the next mock time reference + """ + start_ns = MockTime.get_mock_instance().get_mock_ns() + 1 + log_records.append(mock_info(MOCKED_TIME_MONOTONIC_NS_MSG, fmt_thousands(start_ns))) + return start_ns + 1 + + +def logged_for_time_reference( + log_records: List[LogTupleT], time_reference: int, localtime: bool +) -> int: + """ + Create the log records expected when getting a time reference. + + This is the log records expected when an offset exists after whatever branching logic + is used. This path is followed when the offset is known, whether the cached offset + has expired or not. + + :param List[LogTupleT] log_records: The list to append the log records to. + :param int time_reference: The time reference value to log. + :param bool localtime: True when the local time is being requested. + :return: The incremented time reference, which is the next mock time reference + """ + log_records.append( + mock_info(MOCKED_TIME_MONOTONIC_NS_MSG, fmt_thousands(time_reference)) + ) + if localtime: + log_records.append( + mock_info(MOCKED_TIME_LOCALTIME_MSG, time_reference // NS_PER_SEC) + ) + return time_reference + 1 + + +def logged_for_get_address( + log_records: List[LogTupleT], + time_reference: int, + expected_state: NTPState, + wait_end: int, + notify: Optional[str], +) -> int: + """ + Create the log records expected on an execution path that includes a get socket operation. + + This does not include a get socket request that is skipped during a non-blocking rate + limiting delay. + + REMINDER: Load any needed mock_getaddrinfo_attempts before generating the expected log + records, and execute the test code afterwards + + :param List[LogTupleT] log_records: The list to append the log records to. + :param NTPState expected: The ntp instance state expected after the get socket operation. + :param int time_reference: The time reference at the start of the get socket request. + :param int wait_end: The time when the rate limiting delay ends. + :param Optional[str] notify: The context used when a notification is being generated. + :return: The incremented time reference, which is the next mock time reference + """ + log_records.append( + mock_info(MOCKED_TIME_MONOTONIC_NS_MSG, fmt_thousands(time_reference)) + ) + sleep_ns = 0 + if expected_state.blocking and wait_end > time_reference: + # blocking mode sleeps until the end of the configured delay period. + sleep_seconds = (wait_end - time_reference) / NS_PER_SEC + sleep_ns = int(sleep_seconds * NS_PER_SEC) + log_records.append(mock_info(MOCKED_TIME_SLEEP_MSG, sleep_seconds)) + log_records.append( + mock_info( + MOCKED_POOL_GETADDR_MSG, (expected_state.server, expected_state.port) + ), + ) + if not isinstance( + MockPool.get_mock_instance().mock_getaddrinfo_attempts[0], Exception + ): + return time_reference + sleep_ns + 1 + # If the attempt will fail, a new monotonic_ns call is made to adjust the rate limiting. + log_records.append( + mock_info( + MOCKED_TIME_MONOTONIC_NS_MSG, fmt_thousands(time_reference + sleep_ns + 1) + ) + ) + if notify: + log_records.append( + mock_info( + MOCKED_CALLBACK_MSG + % (notify, EventType.LOOKUP_FAILED, expected_state.limit_end) + ) + ) + return time_reference + sleep_ns + 2 + + +def logged_for_rate_limit_skip( + log_records: List[LogTupleT], time_reference: int +) -> int: + """ + Create the log records expected when when processing skipped due to rate limiting. + + :param List[LogTupleT] log_records: The list to append the log records to. + :param int time_reference: The time reference value to log. + :return: The incremented time reference, which is the next mock time reference + """ + log_records.append( + mock_info(MOCKED_TIME_MONOTONIC_NS_MSG, fmt_thousands(time_reference)) + ) + return time_reference + 1 + + +def logged_for_send_fail( + log_records: List[LogTupleT], + time_reference: int, + expected_state: NTPState, + notify: Optional[str], +) -> int: + """ + Create the log records expected when failing to send an NTP packet. + + :param List[LogTupleT] log_records: The list to append the log records to. + :param NTPState expected_state: The ntp instance state expected after the get socket operation. + Make sure referenced fields are populated before calling: + - socket_timeout + - next_sync + :param int time_reference: The time reference at the start of the get socket request. + :param Optional[str] notify: The context used when a notification is being generated. + :return: The incremented time reference, which is the next mock time reference + """ + # The following set of log records are always expected when attempting to send an NTP packet. + log_records.extend( + [ + mock_info(MOCKED_POOL_SOCKET_MSG % ADDR_SOCK_KEY), + mock_info(MOCKED_SOCK_NEW_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_INIT_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_ENTER_MSG % (ADDR_SOCK_KEY,)), + mock_info( + MOCKED_SOCK_SETTIMEOUT_MSG, + (expected_state.socket_timeout, ADDR_SOCK_KEY), + ), + mock_info(MOCKED_TIME_MONOTONIC_NS_MSG, fmt_thousands(time_reference)), + mock_info(MOCKED_SOCK_SENDTO_MSG, (NTP_ADDRESS_PORT, ADDR_SOCK_KEY)), + mock_info(MOCKED_TIME_MONOTONIC_NS_MSG, fmt_thousands(time_reference + 1)), + ] + ) + if notify: + log_records.append( + mock_info( + MOCKED_CALLBACK_MSG + % ( + notify, + EventType.LOOKUP_FAILED, + time_reference + 1 + expected_state.limit_delay, + ) + ) + ) + log_records.append( + mock_info(MOCKED_SOCK_EXIT_MSG % (ADDR_SOCK_KEY,)) + ) # Always, when context ends + return time_reference + 2 + + +def logged_for_ntp_packet( + log_records: List[LogTupleT], + time_reference: int, + expected_state: NTPState, + notify: Optional[str], +) -> int: + """ + Create the log records expected successfully sending and attempting to get an NTP packet. + + :param List[LogTupleT] log_records: The list to append the log records to. + :param NTPState expected_state: The ntp instance state expected after the get socket operation. + Make sure referenced fields are populated before calling: + - socket_timeout + - next_sync + :param int time_reference: The time reference at the start of the get socket request. + :param Optional[str] notify: The context used when a notification is being generated. + :return: The incremented time reference, which is the next mock time reference + """ + # The following set of log records are always expected when attempting to get an NTP packet. + log_records.extend( + [ + mock_info(MOCKED_POOL_SOCKET_MSG % ADDR_SOCK_KEY), + mock_info(MOCKED_SOCK_NEW_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_INIT_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_ENTER_MSG % (ADDR_SOCK_KEY,)), + mock_info( + MOCKED_SOCK_SETTIMEOUT_MSG, + (expected_state.socket_timeout, ADDR_SOCK_KEY), + ), + mock_info(MOCKED_TIME_MONOTONIC_NS_MSG, fmt_thousands(time_reference)), + mock_info(MOCKED_SOCK_SENDTO_MSG, (NTP_ADDRESS_PORT, ADDR_SOCK_KEY)), + mock_info(MOCKED_SOCK_RECV_INTO_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_TIME_MONOTONIC_NS_MSG, fmt_thousands(time_reference + 1)), + ] + ) + if notify and notify[0:5] == "fail ": + # If notification is on, and receiving the return packet fails, then a log for the + # notification is generated. + log_records.append( + mock_info( + MOCKED_CALLBACK_MSG + % (notify, EventType.SYNC_FAILED, expected_state.next_sync) + ) + ) + log_records.append( + mock_info(MOCKED_SOCK_EXIT_MSG % (ADDR_SOCK_KEY,)) + ) # Always, when context ends + if notify and notify[0:5] == "good ": + # If notification is on, and receiving the return packet succeeded, then a log + # for the notification is generated. + log_records.append( + mock_info( + MOCKED_CALLBACK_MSG + % (notify, EventType.SYNC_COMPLETE, expected_state.next_sync) + ) + ) + return time_reference + 2 diff --git a/tests/shared_for_testing.py b/tests/shared_for_testing.py new file mode 100644 index 0000000..b2d5fb6 --- /dev/null +++ b/tests/shared_for_testing.py @@ -0,0 +1,283 @@ +# SPDX-FileCopyrightText: 2024 H PHil Duby +# +# SPDX-License-Identifier: MIT + +""" +Constants and shared code to use for adafruit_ntp unittesting. + +Enhance consistency by grouping simple values used by more than one unittest (module). +Don't repeat constant value literals to avoid things getting out of sync. +""" + +import os +import sys +import time + +try: + from typing import Tuple, Union, Any, List + + LogTupleT = Tuple[str, str, int, str, Tuple[Any]] + ArgsTupleT = Union[Tuple[Any, ...], Any] # pylint:disable=invalid-name +except ImportError: + pass +try: + # CPython + import socket as SocketPool + from unittest.case import _AssertRaisesContext as RaisesContext +except ImportError: + # CircuitPython + from socketpool import SocketPool # type:ignore + from unittest import AssertRaisesContext as RaisesContext +import adafruit_logging as logging + +MOCK_LOGGER = "mocking_logger" +NS_PER_SEC = 1_000_000_000 # nanoseconds per second + +NTP_PORT = 123 +DEFAULT_NTP_ADDRESS = ("0.adafruit.pool.ntp.org", NTP_PORT) +NTP_PACKET_SIZE = 48 +NTP_SERVER_IPV4_ADDRESS = "10.20.30.40" +NTP_ADDRESS_PORT = (NTP_SERVER_IPV4_ADDRESS, NTP_PORT) +ADDR_SOCK_KEY: Tuple[int, int, int] = (2, 2, 0) # AF_INET, SOCK_DGRAM +IP_SOCKET: List[Tuple[int, int, int, str, Tuple[str, int]]] = [ + (2, 1, 0, "", NTP_ADDRESS_PORT) +] + +# Messages and exceptions to mock + +# For «pool».getaddressinfo(…) +GETADDR_DNS_MSG: Tuple[int, str] = (-2, "Name or service not known") +GAI_EXCEPTION = ( + SocketPool.gaierror +) # will this vary depending on the connection source used? +GETADDR_DNS_EX = GAI_EXCEPTION(GETADDR_DNS_MSG) + +# For NTP +BAD_EVENT_MASK_MSG: str = ( + "Invalid event type mask 0b%s. Only known events can receive notifications." +) + +# Constants used and messages generated by mocking code. + +# For mock_time.MockTime +MOCKED_TIME_DEFAULT_START_NS: int = 1_000_000_000 +MOCKED_TIME_NEW_MSG: str = "mock time.__new__ called" +MOCKED_TIME_FIRST_NEW_MSG: str = "mock time.__new__ new instance" +MOCKED_TIME_MONOTONIC_NS_MSG: str = "mock time.monotonic_ns %s" +MOCKED_TIME_SLEEP_MSG: str = "mock time.sleep(%f)" +MOCKED_TIME_LOCALTIME_MSG: str = "mock time.localtime(%s)" +MOCKED_TIME_NOT_LOCALTIME_MSG: str = ( + "localtime from current system time not implemented for mock" +) +MOCKED_TIME_NOT_LOCALTIME_EX = NotImplementedError(MOCKED_TIME_NOT_LOCALTIME_MSG) + +# For mock_pool.MockPool +MOCKED_POOL_NEW_MSG: str = "mock Pool.__new__ called" +MOCKED_POOL_INITIALIZED_MSG: str = "mock Pool instance initialized" +MOCKED_POOL_INIT_MSG: str = "mock Pool.__init__ called" +MOCKED_POOL_INIT_CLEARED_MSG: str = "mock Pool.__init__ queued attempts cleared" +MOCKED_POOL_GETADDR_MSG: str = "mock Pool.getaddrinfo %s:%s" +MOCKED_POOL_SOCKET_MSG: str = "mock Pool.socket called for %d:%d:%d" + +# For mock_pool.MockSocket +MOCKED_SOCK_NEW_MSG: str = "mock Socket.__new__%r called" +MOCKED_SOCK_SUPER_MSG: str = "mock Socket.__new__%r super called" +MOCKED_SOCK_INITIALIZED_MSG: str = "mock Socket instance initialized for %r" +MOCKED_SOCK_INIT_MSG: str = "mock Socket.__init__%r called" +MOCKED_SOCK_ENTER_MSG: str = "mock Socket.__enter__ called for %r" +MOCKED_SOCK_EXIT_MSG: str = "mock Socket.__exit__ called for %r" +MOCKED_SOCK_SETTIMEOUT_MSG: str = "mock Socket.settimeout(%d) called for %r" +MOCKED_SOCK_SENDTO_MSG: str = "mock Socket.sendto(packet,%r) called for %r" +MOCKED_SOCK_RECV_INTO_MSG: str = "mock Socket.recv_into(packet) called for %r" +MOCKED_SOCK_CLOSE_MSG: str = "mock Socket.close called for %r" + +MOCKED_NO_RESOURCE_MSG: str = "No response value available to mock %s" +MOCKED_CALLBACK_MSG: str = 'Mock notification callback "%s" for event %s and delay %d' + + +class ListHandler(logging.Handler): + """Save log records to a list""" + + def __init__(self, *args, **kwargs): + # super(ListHandler, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) + self.log_records: List[logging.LogRecord] = [] + + def emit(self, record: logging.LogRecord) -> None: + """capture the log record""" + self.log_records.append(record) + + def log_also_to_me(self, logger: logging.Logger) -> bool: + """add the list handler instance to a Logger""" + for existing_handler in logger._handlers: # pylint:disable=protected-access + if existing_handler == self: + return False # already there + logger.addHandler(self) + return True + + def log_only_to_me(self, logger: logging.Logger) -> None: + """replace all handlers of a Logger with just me""" + # pylint:disable=protected-access + while logger._handlers: + logger.removeHandler(logger._handlers[0]) + logger.addHandler(self) + + def to_tuple(self) -> Tuple[LogTupleT, ...]: # type:ignore + """ + log record data, without timestamp, as a tuple that can be directly compared + for unittest verification. + + :return tuples containing name, levelname, levelno, msg, args + :rtype Tuple[LotTupleT]: Tuple[Tuple[str, str, int, str, Tuple[Any]]] + """ + if not self.log_records: + return tuple() + return tuple(_make_tuple(rec) for rec in self.log_records) + + +class ResourcesDepletedError(IndexError): + """ + Exception for mocking code to use when provided resources are not sufficient for + the current request. + """ + + +def setup_logger(name: str) -> logging.Logger: + """ + Sets up and returns a logger with the specified name. + + This function checks the running environment and sets up the logging + appropriately for CircuitPython or standard Python. For standard Python, + it also configures a stream handler with a specific format. + + :param str name: The name of the logger. Typically MOCK_LOGGER. + :return: A configured logging.Logger object for the specified name. + """ + logger = logging.getLogger(name) + # logger.setLevel(unittest_logging_level) + if not logger._handlers: # pylint:disable=protected-access + # Only add 'project' handler when none exist yet + handler = logging.StreamHandler() + logger.addHandler(handler) + + return logger + + +def set_utc_timezone() -> None: + """ + Set local time to UTC for CPython. CircuitPython uses UTC by default. + """ + try: + # Set the local timezone to UTC. Only needed when running unittests from CPython. + os.environ["TZ"] = "UTC" + time.tzset() # Set the local timezone to UTC + except AttributeError: + pass # setting environment variables is not available in CircuitPython + + +def make_log_rec( + name: str, msg: str, level: int = 20, args: Union[Tuple[Any, ...], Any] = () +) -> LogTupleT: # type:ignore + """ + Create expected log record for logger name, level and msg. + + This builds a tuple that can be directly compared to an entry returned by the + ListHandler.to_tuple() method. + + :param str name: name of the logger + :param str msg: message to log + :param int level: logging level, default 20 (info) + :param tuple args: arguments to pass to the log method, default () + :return: tuple, log record without timestamp + """ + args_tuple = args if isinstance(args, tuple) else (args,) + return ( + name, + logging._level_for(level), # pylint:disable=protected-access + level, + msg % args_tuple, + args_tuple, + ) # pylint:disable=protected-access + + +def make_mock_rec( + msg: str, level: int, args: Union[Tuple[Any, ...], Any] = () +) -> LogTupleT: # type:ignore + """ + Create expected log record for MOCK_LOGGER, level and msg. + + :param str msg: message to log + :param int level: logging level + :param tuple args: arguments to pass to the log method, default () + :return: tuple, log record + """ + return make_log_rec(MOCK_LOGGER, msg, level, args) + + +def mock_info(msg: str, args: ArgsTupleT = ()) -> LogTupleT: # type:ignore + """Create expected mock log record for INFO level and msg.""" + return make_mock_rec(msg, logging.INFO, args) # pylint:disable=no-member + + +def _make_tuple(rec: logging.LogRecord) -> LogTupleT: # type:ignore + """ + log record data, without timestamp, as a tuple that can be directly compared + for unittest verification. + + :param logging.LogRecord rec: the log record to convert + :return: tuple, the converted log record data + """ + tmp_args = ( + rec.args if rec.args is None or isinstance(rec.args, tuple) else (rec.args,) + ) + return str(rec.name), str(rec.levelname), int(rec.levelno), str(rec.msg), tmp_args + + +def get_context_exception(context: RaisesContext) -> Exception: + """Get the exception from the context manager. + + Handle difference between unittest for CPython and CircuitPython. + """ + # pylint:disable=no-member + return ( + context.exception_value + if hasattr(context, "exception_value") + else context.exception + ) + + +def fmt_thousands(n: int) -> str: + """Format number with thousands separators.""" + return f"{n:,}" + + +def mock_cleanup() -> None: + """Clean up after mocking code.""" + testing_modules = [ + "tests.ntp_testing_support", + "tests.simulate_ntp_packet", + "tests.shared_for_testing", + "adafruit_ntp", + "tests.mocks.mock_pool", + "time", + "tests.mocks.mock_time", + "tests.mocks", + ] + if sys.implementation.name == "circuitpython": + # CPython unittest crashes if the module being run is deleted before it can be unloaded. + testing_modules.extend( + [ + "tests.test_eventtype", + "tests.test_mock", + "tests.test_ntp_instantiation", + "tests.test_ntp_registration", + "tests.test_ntp", + "tests", + ] + ) + for module_name in testing_modules: + if module_name in sys.modules: + try: + del sys.modules[module_name] + except KeyError: + pass diff --git a/tests/simulate_ntp_packet.py b/tests/simulate_ntp_packet.py new file mode 100644 index 0000000..0a94954 --- /dev/null +++ b/tests/simulate_ntp_packet.py @@ -0,0 +1,261 @@ +# SPDX-FileCopyrightText: 2024 H PHil Duby +# +# SPDX-License-Identifier: MIT + +""" +Support functions to simulate and access NTP packets. +""" + +from collections import namedtuple +import struct +import time +from tests.shared_for_testing import NS_PER_SEC + +NTPPacket = namedtuple( + "NTPPacket", + [ + "leap", + "version", + "mode", + "stratum", + "poll", + "precision", + "root_delay", + "root_dispersion", + "ref_id", + "ref_ts", + "orig_ts", + "recv_ts", + "tx_ts", + ], +) + +# Constants +# !B «leap:2+version:3+mode:3» +# !B stratum +# !B poll +# !b precision +# !i root delay : signed fixed point, negative is anomaly +# !i root dispersion : signed fixed point, negative is anomaly +# !I reference id : ipv4 32 bit address +# !Q !Q !Q !Q NTP timestamps : reference, origin, receive, transmit +NTP_PACKET_STRUCTURE: str = "!BBBbiiiQQQQ" + +NTP_VERSION = 4 # Fixed for packet structure being used +NTP_MODE = 4 # Server: NTP server responding to client requests. +NTP_TO_UNIX_EPOCH_SEC = ( + 2208988800 # NTP epoch (1900-01-01) to Unix epoch (1970-01-01) seconds +) +NTP_TO_UNIX_EPOCH_NS = ( + NTP_TO_UNIX_EPOCH_SEC * NS_PER_SEC +) # NTP to Unix epoch in nanoseconds + + +def create_ntp_packet( # pylint:disable=too-many-arguments,too-many-locals + iso: str = "2024-1-1T10:11:12.5", + *, + leap: int = 0, + mode: int = 4, + stratum: int = 2, + poll: int = 6, + precision: int = -6, + root_delay: int = 1_000_000_000, + root_dispersion: int = 1_000_000_000, + ipv4: str = "10.20.30.40", + ref_delta: int = 0, + receive_delta: int = 0, # , origin_delta: int = 0 + transmit_delay: int = 0, +) -> bytearray: + """ + Build a sample (simulated) NTP packet. + + :param iso: str: variant of ISO time (y-m-dTh:m:s.f) where (f) can extend to nanoseconds and + leading zeros are optional. + :param mode: int «3 bits»: 3 is client, 4 is server + :param leap: int «2 bits»: Leap indicator (0: no warning, 1: 61 sec last min, + 2: 59 sec last min, 3: alarm). + :param stratum: int «4 bits»: network distance from specialized time source reference. + :param poll_interval: int «4 bits»: Poll interval (2^poll_interval seconds). + :param precision: int: Precision (2^precision seconds). + :param root_delay: int: Root delay (nanoseconds). + :param root_dispersion: int: Root dispersion (nanoseconds). + :param ipv4: str: IPv4 dotted decimal address to use for NTP server reference id. + :param ref_delta: int: Reference timestamp delta from utc (nanoseconds). + :param origin_delta: int: Origin timestamp delta from utc (nanoseconds). + origin could be anything. Not related to the server timestamps. Usually 0. + Really needs it's own iso utc string, plus nanoseconds offset, with flag value + to indicate 0, or an absolute NTP timestamp. + :param receive_delta: int: Receive timestamp delta from utc (nanoseconds). + :param transmit_delay: int: Transmit timestamp delta from receive_delta (nanoseconds). + :return: bytearray: NTP version 4, server mode packet. + """ + packet_size = struct.calcsize(NTP_PACKET_STRUCTURE) + packet = bytearray(packet_size) + + leap = min(3, max(0, int(leap))) # 2 bits + mode = min(7, max(0, int(mode))) # 3 bits + poll = min(14, max(4, int(poll))) # 2^poll seconds: since prev poll? + stratum = min(16, max(1, int(stratum))) + precision = min( + -6, max(-30, int(precision)) + ) # resolution of server clock: -20 = ms + reference_ntp_ns = NTP_TO_UNIX_EPOCH_NS + iso_to_nanoseconds(iso) + # Leap Indicator «2 bits» (LI), Version Number «3 bits» (VN), and Mode «3 bits» + li_vn_mode = (leap << 6) | (NTP_VERSION << 3) | mode + + def offset_timestamp(offset_ns: int) -> int: + """64 bit NTP timestamp offset from the reference point.""" + return ns_to_ntp_timestamp(reference_ntp_ns + offset_ns) + + ntp_fields = ( + li_vn_mode, + stratum, + poll, + precision, + ns_to_fixedpoint(root_delay), + ns_to_fixedpoint(root_dispersion), + ipv4_to_int(ipv4), + offset_timestamp(ref_delta), + 0, + offset_timestamp(receive_delta), + offset_timestamp(receive_delta + transmit_delay), + ) + struct.pack_into(NTP_PACKET_STRUCTURE, packet, 0, *ntp_fields) + return packet + + +def ipv4_to_int(ipv4: str) -> int: + """ + Convert a dotted IPv4 address into a 32-bit integer. + + :param ipv4 (str): The IPv4 address in dotted-decimal format (e.g., "192.168.1.1"). + :returns: int: The corresponding 32-bit integer representation of the IPv4 address. + :raises ValueError: If the input string is not a valid IPv4 address. + """ + # Split dotted IPv4 address into its four integer octets + octets = tuple(map(int, ipv4.split("."))) + # Check that each octet is within the valid range (0-255), and that there are 4 values + if len(octets) != 4 or any(octet < 0 or octet > 255 for octet in octets): + raise ValueError("Must be 4 octets, each value must be between 0 and 255.") + # Combine octet values into a 32-bit integer + return (octets[0] << 24) | (octets[1] << 16) | (octets[2] << 8) | octets[3] + + +def iso_to_nanoseconds(iso: str) -> int: + """ + Convert an ISO format datetime string to nanoseconds since the Unix epoch. + + iso is not quite a standard iso format datetime string. + year "-" month "-" day "T" hour ":" minute ":" second, with optional "." nanoseconds + Normally the numeric fields (other than fraction) are fixed 2 or 4 digits, but this is not + enforced. The optional fractional field, when provided, is treated as have trailing zeros + to fill to 9 digits, which is then used as nanoseconds. + + No TZ offset considered: output is UTC time. + + :raises: ValueError if the input input string can not be parsed as an ISO datetime. + """ + # Naive conversion of y-m-dTh:m:s.fraction string to tuple of integer values. No validation. + text = iso.replace("-", " ").replace(":", " ").replace("T", " ").replace(".", " ") + try: + raw_tuple = tuple(map(int, text.split())) + except ValueError as ex: + raise ValueError(f"Invalid ISO datetime string: {iso}") from ex + nine_tuple = raw_tuple[:6] + (0, -1, -1) + seconds = int(time.mktime(time.struct_time(nine_tuple))) + split_fractional = iso.split(".") + # Handle optional fractional seconds + fractional_seconds = split_fractional[-1] if len(split_fractional) > 1 else "" + fractional_digits = len(fractional_seconds) + nanoseconds = ( + 0 if not fractional_seconds else raw_tuple[-1] * (10 ** (9 - fractional_digits)) + ) + return seconds * NS_PER_SEC + nanoseconds + + +def _ns_to_unsigned_fixed_point(nanoseconds: int, fraction_bits: int) -> int: + """ + Convert nanoseconds to an unsigned fixed point seconds value. + + :param ns: int: nanoseconds + :param fraction_bits: int: number of bits for the fraction + :return: int: signed fixed point number for packing + """ + # Separate the integer seconds and fractional nanoseconds + seconds, frac_ns = divmod(abs(nanoseconds), NS_PER_SEC) + # Convert the fractional part to a binary fraction value + fixed_frac = (frac_ns << fraction_bits) // NS_PER_SEC + # Combine the integer part and fractional part into single fixed-point value + return (seconds << fraction_bits) | fixed_frac + + +def ns_to_fixedpoint(nanoseconds: int) -> int: + """ + Convert integer nanoseconds to a signed fixed point format: 16 bits integer, 16 bits fraction + + :param ns: int: nanoseconds + :return: int: signed fixed point number for packing + """ + unsigned_fixed_point = _ns_to_unsigned_fixed_point(nanoseconds, 16) + return unsigned_fixed_point if nanoseconds >= 0 else -unsigned_fixed_point + + +def ns_to_ntp_timestamp(ns_since_ntp_epoch: int) -> int: + """ + Convert nanoseconds since the NTP epoch into a 64-bit NTP timestamp format. + + 64 bit NTP timestamp is 32 bit integer seconds and 32 bit fractional seconds + + :param ns_since_ntp_epoch: Nanoseconds since the NTP epoch (1900-01-01 00:00:00). + :returns: int: A 64-bit integer representing the full NTP timestamp. + """ + return _ns_to_unsigned_fixed_point(ns_since_ntp_epoch, 32) + + +def format_ns_as_iso_timestamp(unix_ns: int) -> str: + """ + Convert nanoseconds since the Unix epoch into an approximation of an ISO datetime string. + + The fractional seconds, when not zero, will be extended to 9 digits (nanoseconds) + + :param unix_ns: Nanoseconds since the Unix epoch (1970-01-01 00:00:00). + :return: str: The corresponding ISO format datetime string. + """ + # Convert nanoseconds to integer seconds and nanoseconds + seconds, fraction = divmod(unix_ns, NS_PER_SEC) + + # Determine the fractional suffix for the time string + tm_suffix = f".{fraction:09}" if fraction != 0 else "" + + tms = time.localtime(seconds) # Convert seconds to a struct_time + + # Format the full ISO timestamp + return ( + f"{tms.tm_year:04}-{tms.tm_mon:02}-{tms.tm_mday:02}T" + + f"{tms.tm_hour:02}:{tms.tm_min:02}:{tms.tm_sec:02}{tm_suffix}" + ) + + +def ntp_timestamp_to_unix_ns(ntp_timestamp: int) -> int: + """ + Convert a 64-bit NTP timestamp into nanoseconds since the Unix epoch. + + :param ntp_timestamp: NTP timestamp (32 bits for seconds and 32 bits for fractional seconds) + :return: int: The number of nanoseconds since the Unix epoch. + """ + # Split the 64-bit NTP timestamp into 32-bit seconds and 32-bit fractional seconds + ntp_seconds, ntp_fraction = divmod(ntp_timestamp, 1 << 32) # 0x100000000 + + # Convert NTP seconds to Unix seconds by subtracting the epoch difference + unix_seconds = ntp_seconds - NTP_TO_UNIX_EPOCH_SEC + + # Convert the fractional part to integer nanoseconds + # nanoseconds = (ntp_fraction * NS_PER_SEC) >> 32 + nanoseconds, remainder = divmod(ntp_fraction * NS_PER_SEC, 1 << 32) # 0x100000000 + # For use in testing, round up. That way round trip should be consistent. + if remainder > 0: + nanoseconds += 1 + + # Combine the seconds and fractional nanoseconds + unix_ns = (unix_seconds * NS_PER_SEC) + nanoseconds + return unix_ns diff --git a/tests/test_eventtype.py b/tests/test_eventtype.py new file mode 100644 index 0000000..d0d5bbc --- /dev/null +++ b/tests/test_eventtype.py @@ -0,0 +1,99 @@ +# SPDX-FileCopyrightText: 2024 H PHil Duby +# +# SPDX-License-Identifier: MIT + +""" +Unittests for the EventType class used by adafruit_ntp. This is an emulated enum, based on +a custom IntFlag class which allows bitwise operations on the event types. +""" + +import unittest +from tests.shared_for_testing import mock_cleanup +from adafruit_ntp import EventType, _IntFlag + + +class TestEventType(unittest.TestCase): + """Unittests for the EventType (emulated enum) class based on the custom IntFlag.""" + + def test_basic_event_types(self): + """Test basic event types and their values.""" + self.assertEqual(EventType.NO_EVENT.value, _IntFlag(0b000)) # 0 + self.assertEqual(EventType.SYNC_COMPLETE.value, _IntFlag(0b001)) # 1 + self.assertEqual(EventType.SYNC_FAILED.value, _IntFlag(0b010)) # 2 + self.assertEqual(EventType.LOOKUP_FAILED.value, _IntFlag(0b100)) # 4 + self.assertEqual(EventType.ALL_EVENTS.value, _IntFlag(0b111)) # 7 + + def test_event_combination(self): + """Test bitwise OR combination of event types.""" + combined_event = EventType.SYNC_COMPLETE | EventType.LOOKUP_FAILED + self.assertEqual(combined_event, _IntFlag(0b101)) # 1 | 4 = 5 + self.assertEqual(combined_event.value, 0b101) # 1 | 4 = 5 + + combined_event = EventType.SYNC_COMPLETE | EventType.SYNC_FAILED + self.assertEqual(combined_event, _IntFlag(0b011)) # 1 | 2 = 3 + self.assertEqual(combined_event.value, 0b011) # 1 | 2 = 3 + + combined_event = EventType.SYNC_FAILED | EventType.LOOKUP_FAILED + self.assertEqual(combined_event, _IntFlag(0b110)) # 2 | 4 = 6 + self.assertEqual(combined_event.value, 0b110) # 2 | 4 = 6 + + def test_event_intersection(self): + """Test bitwise AND intersection of event types.""" + combined_event = EventType.SYNC_COMPLETE | EventType.LOOKUP_FAILED + intersect_event = combined_event & EventType.SYNC_COMPLETE + self.assertEqual(intersect_event.value, 0b001) # 5 & 1 = 1 + + intersect_event = combined_event & EventType.SYNC_FAILED + self.assertEqual(intersect_event.value, 0b000) # 5 & 2 = 0 + + def test_event_complement(self): + """Test bitwise NOT complement of event types.""" + complement_event = ~EventType.SYNC_COMPLETE + # Expected to be the bitwise complement of SYNC_COMPLETE within the range of all flags + expected_value = 2 | EventType.SYNC_FAILED.value | EventType.LOOKUP_FAILED.value + self.assertEqual(complement_event.value, expected_value) + self.assertNotEqual(complement_event.value, EventType.SYNC_COMPLETE.value) + self.assertEqual(complement_event & EventType.SYNC_COMPLETE, 0b000) + self.assertEqual(complement_event & EventType.SYNC_COMPLETE, EventType.NO_EVENT) + self.assertEqual(~EventType.NO_EVENT, 0b111) + self.assertEqual(~EventType.NO_EVENT, EventType.ALL_EVENTS) + + def test_event_equality(self): + """Test equality between event types.""" + event1 = EventType.SYNC_COMPLETE + event2 = EventType.SYNC_COMPLETE + self.assertEqual(event1, event2) + + combined_event = EventType.SYNC_COMPLETE | EventType.LOOKUP_FAILED + self.assertNotEqual(event1, combined_event) + + self.assertEqual(EventType.SYNC_COMPLETE, EventType.SYNC_COMPLETE.value) + self.assertEqual(EventType.SYNC_COMPLETE, _IntFlag(0b001)) + self.assertEqual(EventType.SYNC_COMPLETE, 0b001) + self.assertNotEqual(EventType.SYNC_COMPLETE, EventType.SYNC_FAILED) + self.assertNotEqual(EventType.SYNC_COMPLETE, EventType.NO_EVENT) + self.assertNotEqual(EventType.SYNC_COMPLETE, EventType.LOOKUP_FAILED) + self.assertNotEqual(EventType.SYNC_COMPLETE, EventType.ALL_EVENTS) + self.assertNotEqual(EventType.SYNC_COMPLETE, _IntFlag(0b000)) + self.assertNotEqual(EventType.SYNC_COMPLETE, 0b000) + self.assertNotEqual(EventType.SYNC_COMPLETE, _IntFlag(0b010)) + self.assertNotEqual(EventType.SYNC_COMPLETE, 0b010) + + def test_event_xor(self): + """Test bitwise XOR operation of event types.""" + xor_event = EventType.SYNC_COMPLETE ^ EventType.SYNC_FAILED + self.assertEqual(xor_event.value, 0b011) # 1 ^ 2 = 3 + + xor_event = EventType.SYNC_COMPLETE ^ EventType.SYNC_COMPLETE + self.assertEqual(xor_event.value, 0b000) # 1 ^ 1 = 0 + + def test_event_to_bool(self): + """Test conversion of event types to bool.""" + self.assertTrue(bool(EventType.SYNC_COMPLETE)) + self.assertFalse(bool(EventType.NO_EVENT)) + + +if __name__ == "__main__": + unittest.main() + +mock_cleanup() diff --git a/tests/test_mock.py b/tests/test_mock.py new file mode 100644 index 0000000..e75145b --- /dev/null +++ b/tests/test_mock.py @@ -0,0 +1,1434 @@ +# SPDX-FileCopyrightText: 2024 H PHil Duby +# +# SPDX-License-Identifier: MIT + +""" +Unittests for the mock classes that support adafruit_ntp testing. +""" +# pylint: disable=too-many-lines + +import struct +import sys +import time + +try: + from typing import Tuple +except ImportError: + pass +import unittest + +try: + import socket as SocketPool +except ImportError: + from socketpool import SocketPool # type:ignore +import adafruit_logging as logging +from tests.mocks.mock_pool import MockPool, MockSocket +from tests.mocks import mock_time +from tests.mocks.mock_time import MockTime +from tests.mocks.mock_time import ( + monotonic_ns as mock_monotonic_ns, + sleep as mock_sleep, + localtime as mock_localtime, +) +from tests.shared_for_testing import ( + ListHandler, + ResourcesDepletedError, + RaisesContext, + get_context_exception, + make_mock_rec, + mock_info, + set_utc_timezone, + setup_logger, + fmt_thousands, + mock_cleanup, + MOCK_LOGGER, + NS_PER_SEC, + DEFAULT_NTP_ADDRESS, + MOCKED_NO_RESOURCE_MSG, + MOCKED_POOL_NEW_MSG, + MOCKED_POOL_INITIALIZED_MSG, + MOCKED_POOL_INIT_MSG, + MOCKED_POOL_INIT_CLEARED_MSG, + MOCKED_POOL_GETADDR_MSG, + MOCKED_POOL_SOCKET_MSG, + MOCKED_SOCK_NEW_MSG, + MOCKED_SOCK_SUPER_MSG, + MOCKED_SOCK_INITIALIZED_MSG, + MOCKED_SOCK_INIT_MSG, + MOCKED_SOCK_ENTER_MSG, + MOCKED_SOCK_EXIT_MSG, + MOCKED_SOCK_SETTIMEOUT_MSG, + MOCKED_SOCK_SENDTO_MSG, + MOCKED_SOCK_RECV_INTO_MSG, + MOCKED_TIME_DEFAULT_START_NS, + MOCKED_TIME_NEW_MSG, + MOCKED_TIME_MONOTONIC_NS_MSG, + MOCKED_TIME_SLEEP_MSG, + MOCKED_TIME_LOCALTIME_MSG, + MOCKED_TIME_NOT_LOCALTIME_EX, + NTP_PACKET_SIZE, + NTP_SERVER_IPV4_ADDRESS, + ADDR_SOCK_KEY, + IP_SOCKET, +) +from tests.simulate_ntp_packet import ( + NTPPacket, + create_ntp_packet, + format_ns_as_iso_timestamp, + ipv4_to_int, + iso_to_nanoseconds, + ns_to_fixedpoint, + ns_to_ntp_timestamp, + ntp_timestamp_to_unix_ns, + NTP_PACKET_STRUCTURE, + NTP_TO_UNIX_EPOCH_SEC, + NTP_TO_UNIX_EPOCH_NS, +) + + +class MockException(ValueError): + """Exception to pass to mock code, to verify raised when requested""" + + +MOCKED_EXCEPTION_MSG: str = "mock raised requested exception" +MOCKED_EXCEPTION = MockException(MOCKED_EXCEPTION_MSG) + + +class TestMockPoolSingleton(unittest.TestCase): + """Unittest for MockPool singleton.""" + + _log_handler: ListHandler = None + mogger: logging.Logger = None + + @classmethod + def setUpClass(cls): + """Set up class-level resources.""" + _setup_mock_log_capture(cls) + + @classmethod + def tearDownClass(cls): + """Get rid of the list test specific handler.""" + _teardown_mock_log_capture(cls) + MockPool.clear_mock_singleton() + + def setUp(self): + """Set up test environment""" + _reset_mock_log_capture(self) + MockPool.clear_mock_singleton() + + def tearDown(self): + """Clean up after each test""" + + def test_singleton(self): + """Test mock Pool class instantiation""" + # Cannot use MockPool.get_mock_instance() for the initial test, because that will raise + # an exception when the singleton is not initialized. + # pylint:disable=protected-access + self.assertIsNone( + MockPool._instance, + f"MockPool._instance should start at None, found: {type(MockPool._instance)}", + ) + self.assertFalse( + hasattr(MockPool, "_mock_initialized"), + "MockPool class should not start " + f'with _mock_initialized attribute: {getattr(MockPool, "_mock_initialized", None)}', + ) + pool1 = MockPool() + self.assertIsInstance( + pool1, MockPool, f"instance should be MockPool: {type(pool1)}" + ) + self.assertIs( + pool1, + MockPool.get_mock_instance(), + 'instance should "be" same as _instance', + ) + self.assertFalse( + hasattr(MockPool, "_mock_initialized"), + "MockPool class should not " + f'get _mock_initialized attribute: {getattr(MockPool, "_mock_initialized", None)}', + ) + self.assertTrue( + hasattr(pool1, "_mock_initialized"), + "MockPool instance should have _mock_initialized attribute", + ) + # pylint:disable=no-member + self.assertEqual( + self._log_handler.to_tuple(), + ( + make_mock_rec(MOCKED_POOL_NEW_MSG, logging.DEBUG), + make_mock_rec(MOCKED_POOL_INITIALIZED_MSG, logging.DEBUG), + make_mock_rec(MOCKED_POOL_INIT_MSG, logging.DEBUG), + make_mock_rec(MOCKED_POOL_INIT_CLEARED_MSG, logging.DEBUG), + ), + f"first instance log records should match expected: {self._log_handler.to_tuple()}", + ) + self._log_handler.log_records.clear() + + pool2 = MockPool() + self.assertIsInstance( + pool2, MockPool, f"instance2 should be MockPool: {type(pool2)}" + ) + self.assertIs(pool1, pool2, "second instance should be same as first") + self.assertTrue( + pool2._mock_initialized, + "instance2._mock_initialized should be True: " + f"{type(pool2._mock_initialized)} {pool2._mock_initialized}", + ) + # pylint:disable=no-member + self.assertEqual( + self._log_handler.to_tuple(), + ( + make_mock_rec(MOCKED_POOL_NEW_MSG, logging.DEBUG), + make_mock_rec(MOCKED_POOL_INIT_MSG, logging.DEBUG), + ), + f"second instance log records should match expected: {self._log_handler.to_tuple()}", + ) + + +# end class TestMockPoolSingleton() + + +class TestMockPoolFunction(unittest.TestCase): + """Unittest for MockPool functionality.""" + + GOOD_SERVER: str = "google.ca" + GOOD_PORT: int = 80 + _log_handler: ListHandler = None + mogger: logging.Logger = None + + @classmethod + def setUpClass(cls): + """Set up class-level resources.""" + cls.mock_pool = MockPool() + _setup_mock_log_capture(cls) + + @classmethod + def tearDownClass(cls): + """Get rid of the list test specific handler.""" + _teardown_mock_log_capture(cls) + MockPool.clear_mock_singleton() + MockSocket.clear_mock_singleton() + + def setUp(self): + """Set up test environment""" + self.mock_pool.mock_getaddrinfo_attempts.clear() + MockSocket.clear_mock_singleton() + _reset_mock_log_capture(self) + + def tearDown(self): + """Clean up after each test""" + + def test_class_constants(self): + """See if the mocked class constants match the real class.""" + self.assertEqual( + MockPool.AF_INET, + SocketPool.AF_INET, + f"AF_INET {MockPool.AF_INET} should match {SocketPool.AF_INET}", + ) + self.assertEqual( + MockPool.SOCK_DGRAM, + SocketPool.SOCK_DGRAM, + f"SOCK_DGRAM {MockPool.SOCK_DGRAM} should match {SocketPool.SOCK_DGRAM}", + ) + self.assertEqual( + MockPool.SOCK_STREAM, + SocketPool.SOCK_STREAM, + f"SOCK_STREAM {MockPool.SOCK_STREAM} should match {SocketPool.SOCK_STREAM}", + ) + self.assertEqual( + MockPool.IPPROTO_IP, + SocketPool.IPPROTO_IP, + f"IPPROTO_IP {MockPool.IPPROTO_IP} should match {SocketPool.IPPROTO_IP}", + ) + + def test_empty_responses(self): + """Test empty mock getaddrinfo responses.""" + with self.assertRaises(ResourcesDepletedError) as context: + result = self.mock_pool.getaddrinfo(self.GOOD_SERVER, self.GOOD_PORT) + raise AssertionError( + f"should have raised ResourcesDepletedError, got {result}" + ) + exc_data = get_context_exception(context) + self.assertEqual( + repr(exc_data), + repr(ResourcesDepletedError(MOCKED_NO_RESOURCE_MSG % "getaddrinfo")), + ) + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_POOL_GETADDR_MSG, (self.GOOD_SERVER, self.GOOD_PORT)),), + ) + + def test_lookup_failures(self): + """Test address lookup failures before success.""" + self.mock_pool.set_getaddrinfo_responses( + [ + MOCKED_EXCEPTION, + MOCKED_EXCEPTION, + IP_SOCKET, + ] + ) + + with self.assertRaises(MockException) as context: + result = self.mock_pool.getaddrinfo(None, self.GOOD_PORT) + raise AssertionError(f"should have raised MockException, got {result}") + exc_data = get_context_exception(context) + self.assertEqual(repr(exc_data), repr(MOCKED_EXCEPTION)) + + with self.assertRaises(MockException) as context: + result = self.mock_pool.getaddrinfo(self.GOOD_SERVER, self.GOOD_PORT) + raise AssertionError(f"should have raised MockException, got {result}") + exc_data = get_context_exception(context) + self.assertEqual(repr(exc_data), repr(MOCKED_EXCEPTION)) + result = self.mock_pool.getaddrinfo(self.GOOD_SERVER, self.GOOD_PORT) + self.assertEqual(result, IP_SOCKET) + self.assertEqual( + self._log_handler.to_tuple(), + ( + mock_info(MOCKED_POOL_GETADDR_MSG, (None, self.GOOD_PORT)), + mock_info(MOCKED_POOL_GETADDR_MSG, (self.GOOD_SERVER, self.GOOD_PORT)), + mock_info(MOCKED_POOL_GETADDR_MSG, (self.GOOD_SERVER, self.GOOD_PORT)), + ), + ) + + def test_socket_default(self): + """Test default socket creation.""" + sock = self.mock_pool.socket() + self.assertIsInstance( + sock, MockSocket, f"sock instance should be MockSocket: {type(sock)}" + ) + expected = ( + self.mock_pool.AF_INET, + self.mock_pool.SOCK_STREAM, + self.mock_pool.IPPROTO_IP, + ) + self.assertEqual( + sock.mock_key, + expected, + f"default sock mock_key should match {expected}, got: {sock.mock_key}", + ) + self.assertEqual( + self._log_handler.to_tuple(), + ( + mock_info(MOCKED_POOL_SOCKET_MSG % expected), + mock_info(MOCKED_SOCK_NEW_MSG % (expected,)), + mock_info(MOCKED_SOCK_SUPER_MSG % (expected,)), + mock_info(MOCKED_SOCK_INITIALIZED_MSG % (expected,)), + mock_info(MOCKED_SOCK_INIT_MSG % (expected,)), + ), + "default socket creation log records should match expected, got:\n" + f"{self._log_handler.to_tuple()}", + ) + + def test_socket_dgram(self): + """Test datagram socket creation.""" + sock = self.mock_pool.socket(self.mock_pool.AF_INET, self.mock_pool.SOCK_DGRAM) + self.assertIsInstance( + sock, MockSocket, f"sock instance should be MockSocket: {type(sock)}" + ) + expected = ( + self.mock_pool.AF_INET, + self.mock_pool.SOCK_DGRAM, + self.mock_pool.IPPROTO_IP, + ) + self.assertEqual( + sock.mock_key, + expected, + f"dgram sock mock_key should match {expected}, got: {sock.mock_key}", + ) + self.assertEqual( + self._log_handler.to_tuple(), + ( + mock_info(MOCKED_POOL_SOCKET_MSG % expected), + mock_info(MOCKED_SOCK_NEW_MSG % (expected,)), + mock_info(MOCKED_SOCK_SUPER_MSG % (expected,)), + mock_info(MOCKED_SOCK_INITIALIZED_MSG % (expected,)), + mock_info(MOCKED_SOCK_INIT_MSG % (expected,)), + ), + "dgram socket creation log records should match expected, got:\n" + f"{self._log_handler.to_tuple()}", + ) + self._log_handler.log_records.clear() + + sock2 = self.mock_pool.socket(self.mock_pool.AF_INET, self.mock_pool.SOCK_DGRAM) + self.assertIsInstance( + sock2, MockSocket, f"sock2 instance should be MockSocket: {type(sock)}" + ) + expected = ( + self.mock_pool.AF_INET, + self.mock_pool.SOCK_DGRAM, + self.mock_pool.IPPROTO_IP, + ) + self.assertEqual( + sock2.mock_key, + expected, + f"dgram sock mock_key should match {expected}, got: {sock.mock_key}", + ) + self.assertEqual( + self._log_handler.to_tuple(), + ( + mock_info(MOCKED_POOL_SOCKET_MSG % expected), + mock_info(MOCKED_SOCK_NEW_MSG % (expected,)), + mock_info(MOCKED_SOCK_INIT_MSG % (expected,)), + ), + "duplicate dgram socket creation log records should match expected, got:\n" + f"{self._log_handler.to_tuple()}", + ) + + +# end class TestMockPool() + + +class TestMockSocketSingleton(unittest.TestCase): + """Unittest for MockSocket singleton""" + + _log_handler: ListHandler = None + mogger: logging.Logger = None + + @classmethod + def setUpClass(cls): + """Set up class-level resources.""" + _setup_mock_log_capture(cls) + + @classmethod + def tearDownClass(cls): + """Get rid of the list test specific handler.""" + _teardown_mock_log_capture(cls) + MockSocket.clear_mock_singleton() + + def setUp(self): + """Set up test environment""" + _reset_mock_log_capture(self) + MockSocket.clear_mock_singleton() + + def tearDown(self): + """Clean up after each test""" + + def test_singleton(self): + """Test mock Socket class instantiation""" + # pylint:disable=protected-access + sock_instances = MockSocket.get_mock_instance() + self.assertIsInstance( + sock_instances, dict, "MockSocket._instances should be a dict" + ) + self.assertEqual( + len(sock_instances), 0, "MockSocket._instances should start empty" + ) + mock_sock = MockSocket(*ADDR_SOCK_KEY) + self.assertIsNotNone(mock_sock, "Should have returned an instance") + self.assertIs( + sock_instances, + mock_sock.get_mock_instance(), + 'MockSocket._instances should "be" the instance._instances', + ) + self.assertEqual( + len(sock_instances), + 1, + "MockSocket._instances should contain a single entry", + ) + self.assertIn( + ADDR_SOCK_KEY, + sock_instances, + "MockSocket._instances should have expected key", + ) + self.assertEqual( + self._log_handler.to_tuple(), + ( + mock_info(MOCKED_SOCK_NEW_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_SUPER_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_INITIALIZED_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_INIT_MSG % (ADDR_SOCK_KEY,)), + ), + "first instance log records should match expected, got: " + + f"{self._log_handler.to_tuple()}", + ) + self._log_handler.log_records.clear() + + # duplicate instance + mock_sock1 = MockSocket(*ADDR_SOCK_KEY) + self.assertIs(mock_sock, mock_sock1, "should be the same MockSocket instance") + self.assertEqual( + len(sock_instances), + 1, + "MockSocket._instances should contain a single entry", + ) + self.assertEqual( + tuple(sock_instances.keys())[0], + ADDR_SOCK_KEY, + "MockSocket._instances should have expected entry", + ) + self.assertEqual( + self._log_handler.to_tuple(), + ( + mock_info(MOCKED_SOCK_NEW_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_INIT_MSG % (ADDR_SOCK_KEY,)), + ), + "duplicate instance log records should match expected, got: " + f"{self._log_handler.to_tuple()}", + ) + self._log_handler.log_records.clear() + + # different instance + alt_sock_key = (1, 2, 3) + mock_sock2 = MockSocket(*alt_sock_key) + self.assertIsNotNone(mock_sock) + self.assertIsNot( + mock_sock, mock_sock2, "should not be the same MockSocket instance" + ) + self.assertEqual( + len(sock_instances), 2, "MockSocket._instances should contain two entries" + ) + self.assertIn( + alt_sock_key, + sock_instances, + "MockSocket._instances should have expected alternate entry", + ) + self.assertEqual( + self._log_handler.to_tuple(), + ( + mock_info(MOCKED_SOCK_NEW_MSG % (alt_sock_key,)), + mock_info(MOCKED_SOCK_SUPER_MSG % (alt_sock_key,)), + mock_info(MOCKED_SOCK_INITIALIZED_MSG % (alt_sock_key,)), + mock_info(MOCKED_SOCK_INIT_MSG % (alt_sock_key,)), + ), + "alternate instance log records should match expected, got: " + f"{self._log_handler.to_tuple()}", + ) + + +# end class TestMockSocketSingleton() + + +class TestMockSocketFunction(unittest.TestCase): + """Unittest for MockSocket functionality""" + + _log_handler: ListHandler = None + mogger: logging.Logger = None + + @classmethod + def setUpClass(cls): + """Set up class-level resources.""" + _setup_mock_log_capture(cls) + + @classmethod + def tearDownClass(cls): + """Get rid of the list test specific handler.""" + _teardown_mock_log_capture(cls) + + def setUp(self): + """Set up test environment""" + MockSocket.clear_mock_singleton() + # The method being tested will be used by the instance of the app being tested, which + # is created after the 'controlling' instance in the unit tests. Create the controlling + # instance, clear the log records, and then create the app instance for the test. + self.mock_control = MockSocket(*ADDR_SOCK_KEY) + self.mock_socket = MockSocket(*ADDR_SOCK_KEY) + _reset_mock_log_capture(self) + + def tearDown(self): + """Clean up after each test""" + + def test_settimeout_direct(self): + """test direct call to MockSocket.settimeout""" + timeout = 15 + self.mock_socket.settimeout(timeout) + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_SOCK_SETTIMEOUT_MSG, (timeout, ADDR_SOCK_KEY)),), + ) + + def test_settimeout_with(self): + """test MockSocket.settimeout inside with""" + with MockSocket(*ADDR_SOCK_KEY) as sock: + timeout = 15 + sock.settimeout(timeout) + self.assertEqual( + self._log_handler.to_tuple(), + ( + mock_info(MOCKED_SOCK_NEW_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_INIT_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_ENTER_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_SETTIMEOUT_MSG, (timeout, ADDR_SOCK_KEY)), + mock_info(MOCKED_SOCK_EXIT_MSG % (ADDR_SOCK_KEY,)), + ), + ) + + def test_sendto_direct(self): + """test direct call to MockSocket.sendto""" + # The method being tested will be used by the instance of the app being tested, which + # is created after the 'controlling' instance in the unit tests. Create the controlling + # instance, clear the log records, and then create the app instance for the test. + size, send_packet, ref_send_packet = self._setup_test_sendto() + self.assertEqual( + len(ref_send_packet), size, f"NTP Packet should be {size} bytes" + ) + self.assertIsNot( + send_packet, ref_send_packet, "packet to send should be a copy" + ) + self.assertEqual( + send_packet, ref_send_packet, "packet to send should match ref" + ) + + result = self.mock_socket.sendto(send_packet, DEFAULT_NTP_ADDRESS) + self.assertEqual(result, size, f"sendto should return {size}, got {result}") + self._match_send_packet(self.mock_control, send_packet, ref_send_packet) + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_SOCK_SENDTO_MSG, (DEFAULT_NTP_ADDRESS, ADDR_SOCK_KEY)),), + ) + + def test_sendto_with(self): + """test MockSocket.sendto using with""" + size, send_packet, ref_send_packet = self._setup_test_sendto() + with MockSocket(*ADDR_SOCK_KEY) as sock: + result = sock.sendto(send_packet, DEFAULT_NTP_ADDRESS) + self.assertEqual(result, size, f"sendto should return {size}, got {result}") + self._match_send_packet(self.mock_control, send_packet, ref_send_packet) + self.assertEqual( + self._log_handler.to_tuple(), + ( + mock_info(MOCKED_SOCK_NEW_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_INIT_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_ENTER_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_SENDTO_MSG, (DEFAULT_NTP_ADDRESS, ADDR_SOCK_KEY)), + mock_info(MOCKED_SOCK_EXIT_MSG % (ADDR_SOCK_KEY,)), + ), + ) + + def _setup_test_sendto(self) -> Tuple[int, bytearray, bytearray]: + """setup data to use for testing sendto method.""" + base_packet = create_ntp_packet(mode=3) + ref_send_packet = bytearray((0,) * len(base_packet)) + ref_send_packet[0] = base_packet[0] # Leap:2, Version:3, Mode:3 + self.mock_control.mock_sendto_attempts = [NTP_PACKET_SIZE] + send_packet = ref_send_packet[:] + return NTP_PACKET_SIZE, send_packet, ref_send_packet + + def test_empty_recv_into_responses(self): + """Test empty recv_into responses.""" + source_packet = create_ntp_packet(mode=3) + test_sock = MockSocket(*ADDR_SOCK_KEY) + self._log_handler.log_records.clear() + with self.assertRaises(ResourcesDepletedError) as context: + result = test_sock.recv_into(source_packet) + raise AssertionError( + f"should have raised ResourcesDepletedError, got {result}" + ) + exc_data = get_context_exception(context) + self.assertEqual( + repr(exc_data), + repr(ResourcesDepletedError(MOCKED_NO_RESOURCE_MSG % "recv_into")), + ) + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_SOCK_RECV_INTO_MSG % (ADDR_SOCK_KEY,)),), + ) + self._log_handler.log_records.clear() + + def test_empty_sendto_responses(self): + """Test empty sendto responses.""" + source_packet = create_ntp_packet(mode=3) + test_sock = MockSocket(*ADDR_SOCK_KEY) + self._log_handler.log_records.clear() + with self.assertRaises(ResourcesDepletedError) as context: + result = test_sock.sendto(source_packet, NTP_SERVER_IPV4_ADDRESS) + raise AssertionError( + f"should have raised ResourcesDepletedError, got {result}" + ) + exc_data = get_context_exception(context) + self.assertEqual( + repr(exc_data), + repr(ResourcesDepletedError(MOCKED_NO_RESOURCE_MSG % "sendto")), + ) + self.assertEqual( + self._log_handler.to_tuple(), + ( + mock_info( + MOCKED_SOCK_SENDTO_MSG, (NTP_SERVER_IPV4_ADDRESS, ADDR_SOCK_KEY) + ), + ), + ) + self._log_handler.log_records.clear() + + def test_recv_into_direct(self): + """test MockSocket.recv_into with good data""" + recv_packet = create_ntp_packet(mode=4, ref_delta=500_000_000) + self.mock_control.mock_recv_into_attempts = [recv_packet] + source_packet = bytearray((0,) * len(recv_packet)) + test_sock = MockSocket(*ADDR_SOCK_KEY) + result = test_sock.recv_into(source_packet) + self.assertEqual( + result, + len(recv_packet), + f"recv_into should return {len(recv_packet)}, got {result}", + ) + self.assertEqual( + source_packet, recv_packet, "source packet should match received" + ) + self.assertEqual( + self._log_handler.to_tuple(), + ( + mock_info(MOCKED_SOCK_NEW_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_INIT_MSG % (ADDR_SOCK_KEY,)), + mock_info(MOCKED_SOCK_RECV_INTO_MSG % (ADDR_SOCK_KEY,)), + ), + ) + + def test_recv_into_raise_exception(self): + """test MockSocket.recv_into raising injected exception""" + self.mock_control.mock_recv_into_attempts = [MOCKED_EXCEPTION] + source_packet = bytearray((0,) * NTP_PACKET_SIZE) + with self.assertRaises(MockException) as context: + result = self.mock_socket.recv_into(source_packet) + raise AssertionError(f"should have raised MockException, got {result}") + exc_data = get_context_exception(context) + self.assertEqual(repr(exc_data), repr(MOCKED_EXCEPTION)) + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_SOCK_RECV_INTO_MSG % (ADDR_SOCK_KEY,)),), + ) + + def test_sendto_raise_exception(self): + """test MockSocket.sendto raising injected exception""" + self.mock_control.mock_sendto_attempts = [MOCKED_EXCEPTION] + source_packet = bytearray((0,) * NTP_PACKET_SIZE) + with self.assertRaises(MockException) as context: + result = self.mock_socket.sendto(source_packet, NTP_SERVER_IPV4_ADDRESS) + raise AssertionError(f"should have raised MockException, got {result}") + exc_data = get_context_exception(context) + self.assertEqual(repr(exc_data), repr(MOCKED_EXCEPTION)) + self.assertEqual( + self._log_handler.to_tuple(), + ( + mock_info( + MOCKED_SOCK_SENDTO_MSG, (NTP_SERVER_IPV4_ADDRESS, ADDR_SOCK_KEY) + ), + ), + ) + + def _match_send_packet( + self, control: MockSocket, send_packet: bytearray, ref_send_packet: bytearray + ) -> None: + """ + Match the send packet to the reference packet + + :param control: MockSocket: mock control instance for socket + :param send_packet: bytearray: packet sent + :param ref_send_packet: bytearray: reference packet + """ + self.assertIsNot( + send_packet, ref_send_packet, "packet to send should be a copy" + ) + self.assertEqual( + len(control.mock_send_packet), + 1, + "Should be 1 packet in the send " + f"capture buffer, found {len(control.mock_send_packet)}", + ) + captured_packet = control.mock_send_packet.pop(0) + self.assertEqual(captured_packet, ref_send_packet) + self.assertIsNot( + captured_packet, send_packet, "Captured packet should be a copy" + ) + raw_fields = struct.unpack(NTP_PACKET_STRUCTURE, captured_packet) + lvm = raw_fields[0] # Leap:2, Version:3, Mode:3 + captured_fields = NTPPacket( + (lvm & 0b11000000) >> 6, + (lvm & 0b00111000) >> 3, + lvm & 0b00000111, + *raw_fields[1:], + ) + self.assertEqual( + captured_fields, NTPPacket(0, 4, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + ) + + +# end class TestMockSocketFunction() + + +class TestPacketUtilities(unittest.TestCase): + """Unittest for ntp packet and field manipulation utility functions""" + + @classmethod + def setUpClass(cls): + """Set up class-level resources.""" + set_utc_timezone() + + @classmethod + def tearDownClass(cls): + """Get rid of resources used for the TestCase.""" + + def test_ipv4_to_int(self): + """test ipv4_to_int""" + ipv4 = "192.168.1.1" + expected = ( + 0xC0A80101 # hex(192) hex(168) hex(1) hex(1), 8 bits each = 0xc0a80101 + ) + result = ipv4_to_int(ipv4) + self.assertEqual( + result, expected, f"Expected {expected} for {ipv4}, got {result}" + ) + ipv4 = "255.255.255.255" + expected = 0xFFFFFFFF + result = ipv4_to_int(ipv4) + self.assertEqual( + result, expected, f"Expected {expected} for {ipv4}, got {result}" + ) + ipv4 = "0.0.0.0" + expected = 0 + result = ipv4_to_int(ipv4) + self.assertEqual( + result, expected, f"Expected {expected} for {ipv4}, got {result}" + ) + + def test_iso_to_nanoseconds(self): + """test iso_to_seconds""" + iso = "2024-1-1T10:11:12.123456789" + result = iso_to_nanoseconds(iso) + expected = 1704103872123456789 + self.assertEqual( + result, expected, f"Expected {expected} for {iso}, got {result}" + ) + iso = "2024-1-1T10:11:12.12345678" + result = iso_to_nanoseconds(iso) + expected = 1704103872123456780 + self.assertEqual( + result, expected, f"Expected {expected} for {iso}, got {result}" + ) + iso = "2024-1-1T10:11:12.5" + result = iso_to_nanoseconds(iso) + expected = 1704103872500000000 + self.assertEqual( + result, expected, f"Expected {expected} for {iso}, got {result}" + ) + iso = "2021-08-11T16:12:34" + result = iso_to_nanoseconds(iso) + expected = 1628698354000000000 + self.assertEqual( + result, expected, f"Expected {expected} for {iso}, got {result}" + ) + iso = "2023-12-31T23:59:59.987654321" + result = iso_to_nanoseconds(iso) + expected = 1704067199987654321 + self.assertEqual( + result, expected, f"Expected {expected} for {iso}, got {result}" + ) + + def test_ns_to_fixedpoint(self): + """test ns_to_fixedpoint""" + nanoseconds = 1 + result = ns_to_fixedpoint(nanoseconds) + expected = 0 + nanoseconds = 900 + result = ns_to_fixedpoint(nanoseconds) + expected = 0 + self.assertEqual( + result, expected, f"Expected {expected} for {nanoseconds}, got {result}" + ) + nanoseconds = 500_000_000 + result = ns_to_fixedpoint(nanoseconds) + expected = 0x8000 + self.assertEqual( + result, expected, f"Expected {expected} for {nanoseconds}, got {result}" + ) + nanoseconds = 2_500_000_000 + result = ns_to_fixedpoint(nanoseconds) + expected = 0x28000 + self.assertEqual( + result, expected, f"Expected {expected} for {nanoseconds}, got {result}" + ) + nanoseconds = -500_000_000 + result = ns_to_fixedpoint(nanoseconds) + expected = -1 * (0x8000) + self.assertEqual( + result, expected, f"Expected {expected} for {nanoseconds}, got {result}" + ) + nanoseconds = -2_500_000_000 + result = ns_to_fixedpoint(nanoseconds) + expected = -1 * (0x28000) + self.assertEqual( + result, expected, f"Expected {expected} for {nanoseconds}, got {result}" + ) + + def test_ns_to_ntp_timestamp(self): + """test ns_to_ntp_timestamp""" + # iso = '2024-1-1T10:11:12.5' + # utc ns = 1_704_103_872_500_000_000 + # seconds = 1_704_103_872, 0x65928fc0 + tst_seconds = 1_704_103_872 + NTP_TO_UNIX_EPOCH_SEC # 0xe93d0e40 + tst_ns = 500_000_000 + ntp_ns = (tst_seconds * NS_PER_SEC) + tst_ns + expected = 0xE93D0E4080000000 + result = ns_to_ntp_timestamp(ntp_ns) + self.assertEqual( + result, expected, f"Expected {expected} for {ntp_ns}, got {result}" + ) + + tst_ns = 3906250 + ntp_ns = (tst_seconds * NS_PER_SEC) + tst_ns + expected = 0xE93D0E4001000000 + result = ns_to_ntp_timestamp(ntp_ns) + self.assertEqual( + result, expected, f"Expected {expected} for {ntp_ns}, got {result}" + ) + + tst_ns = 1 + ntp_ns = (tst_seconds * NS_PER_SEC) + tst_ns + expected = 0xE93D0E4000000004 + result = ns_to_ntp_timestamp(ntp_ns) + self.assertEqual( + result, expected, f"Expected {expected} for {ntp_ns}, got {result}" + ) + + def test_ntp_timestamp_to_unix_ns(self): + """test ntp_timestamp_to_unix_ns""" + # iso = '2024-1-1T10:11:12.5' + # utc ns = 1_704_103_872_500_000_000 + ntp = 0xE93D0E4080000000 + expected = 1_704_103_872_500_000_000 + result = ntp_timestamp_to_unix_ns(ntp) + self.assertEqual( + result, expected, f"Expected0 {expected} for {ntp:x}, got {result}" + ) + ntp = 0xE93D0E4000000001 + expected = 1_704_103_872_000_000_001 + result = ntp_timestamp_to_unix_ns(ntp) + self.assertEqual( + result, expected, f"Expected1 {expected} for {ntp:x}, got {result}" + ) + ntp = 0xE93D0E4000000002 + expected = 1_704_103_872_000_000_001 + result = ntp_timestamp_to_unix_ns(ntp) + self.assertEqual( + result, expected, f"Expected1 {expected} for {ntp:x}, got {result}" + ) + ntp = 0xE93D0E4000000003 + expected = 1_704_103_872_000_000_001 + result = ntp_timestamp_to_unix_ns(ntp) + self.assertEqual( + result, expected, f"Expected1 {expected} for {ntp:x}, got {result}" + ) + ntp = 0xE93D0E4000000004 + expected = 1_704_103_872_000_000_001 + result = ntp_timestamp_to_unix_ns(ntp) + self.assertEqual( + result, expected, f"Expected2 {expected} for {ntp:x}, got {result}" + ) + ntp = 0xE93D0E4000000005 + expected = 1_704_103_872_000_000_002 + result = ntp_timestamp_to_unix_ns(ntp) + self.assertEqual( + result, expected, f"Expected3 {expected} for {ntp:x}, got {result}" + ) + + def test_ns_ntp_ns_round_trip(self): + """test ns_to_ntp_timestamp and back""" + base_utc_ns = 1_704_103_872 * NS_PER_SEC + # Test across different nanosecond ranges + for slide in range(32): + slide_offset = 2**slide + for ns_offset in range(12): + utc_ns = base_utc_ns + slide_offset + ns_offset + result = ntp_timestamp_to_unix_ns( + ns_to_ntp_timestamp(utc_ns + NTP_TO_UNIX_EPOCH_NS) + ) + self.assertEqual( + result, + utc_ns, + f"Expected {fmt_thousands(utc_ns)} for round trip, got {fmt_thousands(result)}", + ) + + def test_create_ntp_packet(self): # pylint:disable=too-many-locals + """test create_ntp_packet""" + test_iso = "2024-01-01T10:11:12.987654321" + test_leap = 0 + fixed_version = 4 + test_mode = 5 + test_stratum = 3 + test_poll = 9 + test_precision = -9 + test_root_delay = 1_543_210_987 + test_root_dispersion = 567_432 + test_ipv4 = "10.23.45.67" + test_ref_delta = 0 # iso is the reference + test_receive_delta = 10_012_345_678 # 10 seconds plus adjust to get all 9's ns + test_transmit_delay = 500_000_002 # just over half a second + result = create_ntp_packet( + test_iso, + leap=test_leap, + mode=test_mode, + stratum=test_stratum, + poll=test_poll, + precision=test_precision, + root_delay=test_root_delay, + root_dispersion=test_root_dispersion, + ipv4=test_ipv4, + ref_delta=test_ref_delta, + receive_delta=test_receive_delta, + transmit_delay=test_transmit_delay, + ) + raw_fields = struct.unpack(NTP_PACKET_STRUCTURE, result) + lvm = raw_fields[0] # Leap:2, Version:3, Mode:3 + created_fields = NTPPacket( + (lvm & 0b11000000) >> 6, + (lvm & 0b00111000) >> 3, + lvm & 0b00000111, + *raw_fields[1:], + ) + self.assertEqual( + created_fields.leap, + test_leap, + f"leap is {created_fields.leap}, expected {test_leap}", + ) + self.assertEqual( + created_fields.version, + fixed_version, + f"version is {created_fields.version}, expected {fixed_version}", + ) + self.assertEqual( + created_fields.mode, + test_mode, + f"mode is {created_fields.mode}, expected {test_mode}", + ) + self.assertEqual( + created_fields.stratum, + test_stratum, + f"stratum is {created_fields.stratum}, expected {test_stratum}", + ) + self.assertEqual( + created_fields.poll, + test_poll, + f"poll is {created_fields.poll}, expected {test_poll}", + ) + self.assertEqual( + created_fields.precision, + test_precision, + f"precision is {created_fields.precision}, expected {test_precision}", + ) + formatted = ns_to_fixedpoint(test_root_delay) + self.assertEqual( + created_fields.root_delay, + formatted, + f"root_delay is {fmt_thousands(created_fields.root_delay)} " + + f"{created_fields.root_delay:x}, " + + f"expected {fmt_thousands(test_root_delay)} {formatted:x}", + ) + formatted = ns_to_fixedpoint(test_root_dispersion) + self.assertEqual( + created_fields.root_dispersion, + formatted, + "root_dispersion is " + f"{fmt_thousands(created_fields.root_dispersion)} " + + f"{created_fields.root_dispersion:x}, " + + f"expected {fmt_thousands(test_root_dispersion)} {formatted:x}", + ) + formatted = "{}.{}.{}.{}".format( # pylint:disable=consider-using-f-string + *tuple( + (created_fields.ref_id >> shift) & 0xFF for shift in range(24, -1, -8) + ) + ) + self.assertEqual( + formatted, test_ipv4, f"ref_id (IPv4) is {formatted}, expected {test_ipv4}" + ) + formatted = format_ns_as_iso_timestamp( + ntp_timestamp_to_unix_ns(created_fields.ref_ts) + ) + expected = "2024-01-01T10:11:12.987654321" + self.assertEqual( + formatted, expected, f"ref_ts is {formatted}, expected {expected}" + ) + formatted = format_ns_as_iso_timestamp( + ntp_timestamp_to_unix_ns(created_fields.recv_ts) + ) + expected = "2024-01-01T10:11:22.999999999" + self.assertEqual( + formatted, expected, f"recv_ts is {formatted}, expected {expected}" + ) + formatted = format_ns_as_iso_timestamp( + ntp_timestamp_to_unix_ns(created_fields.tx_ts) + ) + expected = "2024-01-01T10:11:23.500000001" + self.assertEqual( + formatted, expected, f"tx_ts is {formatted}, expected {expected}" + ) + self.assertEqual( + created_fields.orig_ts, + 0, + f"orig_ts is {created_fields.orig_ts}, expected {0}", + ) + + +# end class TestPacketUtilities() + + +class TestMockTimeSingleton(unittest.TestCase): + """Unittest for MockTime singleton""" + + _log_handler: ListHandler = None + mogger: logging.Logger = None + + @classmethod + def setUpClass(cls): + """Set up class-level resources.""" + _setup_mock_log_capture(cls) + + @classmethod + def tearDownClass(cls): + """Get rid of the list test specific handler.""" + _teardown_mock_log_capture(cls) + mock_time.clear_mock_singleton() + + def setUp(self): + """Set up test environment""" + mock_time.clear_mock_singleton() + _reset_mock_log_capture(self) + + def test_singleton(self): + """Test mock class instantiation""" + base_instance = mock_time.get_mock_instance() + self.assertIs( + base_instance, + MockTime.get_mock_instance(), + "MockTime._instance should match the module level instance", + ) + time_instance = MockTime() + self.assertIsNotNone( + time_instance, + f"MockTime Should have returned an instance: {type(time_instance)}", + ) + self.assertIs( + base_instance, + time_instance, + "MockTime instantiation should match the module level instance", + ) + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_TIME_NEW_MSG),), + "First instance log records should match expected; " + f"got:\n{self._log_handler.to_tuple()}", + ) + # Really time_instance is not the first instance. base_instance was created first + # when the module was imported, and mock_time.clear_mock_singleton() makes sure + # that stays current. That is why the above log check does NOT include a record for + # MOCKED_TIME_FIRST_NEW_MSG + self._log_handler.log_records.clear() + + # duplicate instance + time_instance1 = MockTime() + self.assertIs( + time_instance, time_instance1, "should be the same MockTime instance" + ) + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_TIME_NEW_MSG),), + "Other instance log records should match expected; " + f"got:\n{self._log_handler.to_tuple()}", + ) + + +# end class TestMockSocketSingleton() + + +class TestMockTimeFunction(unittest.TestCase): + """Unittest for MockTime functionality""" + + _log_handler: ListHandler = None + mogger: logging.Logger = None + + @classmethod + def setUpClass(cls): + """Set up class-level resources.""" + _setup_mock_log_capture(cls) + set_utc_timezone() + # cls.mock_control = mock_time.get_mock_instance() + + @classmethod + def tearDownClass(cls): + """Get rid of the list test specific handler.""" + _teardown_mock_log_capture(cls) + mock_time.clear_mock_singleton() + + def setUp(self): + """Set up test environment""" + mock_time.clear_mock_singleton() + self.mock_control = mock_time.get_mock_instance() + # self.mock_control.set_mock_ns(MOCKED_TIME_DEFAULT_START_NS) + _reset_mock_log_capture(self) + + def tearDown(self): + """Clean up after each test""" + + def test_get_mock_ns(self): + """test MockTime.get_mock_ns""" + expected = MOCKED_TIME_DEFAULT_START_NS + actual = self.mock_control.get_mock_ns() + self.assertEqual( + actual, expected, f"Initial mock time should be {expected}, got {actual}" + ) + expected = 1_234_567 + self.mock_control.set_mock_ns(expected) + actual = self.mock_control.get_mock_ns() + self.assertEqual( + actual, expected, f"Updated mock time should be {expected}, got {actual}" + ) + self.assertEqual( + self._log_handler.to_tuple(), + (), + f"Using get_mock_ns should not create any log records: {self._log_handler.to_tuple()}", + ) + + def test_set_mock_ns(self): + """test MockTime.set_mock_ns""" + expected = 7_654_321 + self.mock_control.set_mock_ns(expected) + actual = self.mock_control.get_mock_ns() + self.assertEqual( + actual, + expected, + f"mock instance monotonic_ns should be {expected}, got {actual}", + ) + actual = MockTime.get_mock_instance().get_mock_ns() + self.assertEqual( + actual, + expected, + f"mock Class monotonic_ns should be be {expected}, got {actual}", + ) + self.assertEqual( + self._log_handler.to_tuple(), + (), + f"Using set_mock_ns should not create any log records: {self._log_handler.to_tuple()}", + ) + + def test_new(self): + """test MockTime.__new__""" + expected = MOCKED_TIME_DEFAULT_START_NS + tm_instance = MockTime() + actual = tm_instance.get_mock_ns() + self.assertEqual( + actual, + expected, + f"default mock time should start at {expected}, got {actual}", + ) + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_TIME_NEW_MSG),), + "Instance1 reset log records should match expected; " + f"got:\n{self._log_handler.to_tuple()}", + ) + self._log_handler.log_records.clear() + + expected = 1_345_987_246 + tm_instance = MockTime() + tm_instance.set_mock_ns(expected) + actual = tm_instance.get_mock_ns() + self.assertEqual( + actual, + expected, + f"specified mock time should start at {expected}, got {actual}", + ) + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_TIME_NEW_MSG),), + "Instance2 reset log records should match expected; " + f"got:\n{self._log_handler.to_tuple()}", + ) + + def test_monotonic_ns(self): + """test MockTime.monotonic_ns, mock_time.monotonic_ns, and mock_monotonic_ns""" + expected = MOCKED_TIME_DEFAULT_START_NS + actual = self.mock_control.get_mock_ns() + self.assertEqual( + actual, + expected, + f"starting mock time should start at {expected}, got {actual}", + ) + expected += 1 + actual = self.mock_control.monotonic_ns() + self._verify_monotonic_ns_increment(expected, actual, "instance monotonic_ns") + self._log_handler.log_records.clear() + expected += 1 + actual = mock_time.monotonic_ns() + self._verify_monotonic_ns_increment(expected, actual, "mock_time.monotonic_ns") + self._log_handler.log_records.clear() + expected += 1 + actual = mock_monotonic_ns() + self._verify_monotonic_ns_increment(expected, actual, "mock_monotonic_ns") + + def _verify_monotonic_ns_increment( + self, expected: int, actual: int, label: str + ) -> None: + """Consistent check of monotonic_ns result.""" + self.assertEqual( + actual, + expected, + f"{label} should have incremented to {expected}, got {actual}", + ) + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_TIME_MONOTONIC_NS_MSG, (fmt_thousands(expected),)),), + f"{label} log records should match expected; got:\n{self._log_handler.to_tuple()}", + ) + + def test_sleep(self): + """test MockTime.sleep, mock_time.sleep and mock_sleep""" + sleep_time = 1.5 # float seconds + # Do not use simple 1_500_000_000 offset. CircuitPython precision does not get there. + sleep_ns = int(sleep_time * 1_000_000_000) + expected = MOCKED_TIME_DEFAULT_START_NS + sleep_ns + self.mock_control.sleep(sleep_time) + self._verify_sleep_time(sleep_time, expected, "instance sleep") + self._log_handler.log_records.clear() + expected += sleep_ns + mock_time.sleep(sleep_time) + self._verify_sleep_time(sleep_time, expected, "mock_time.sleep") + self._log_handler.log_records.clear() + expected += sleep_ns + mock_sleep(sleep_time) + self._verify_sleep_time(sleep_time, expected, "mock_sleep") + + def _verify_sleep_time(self, sleep_time: float, expected: int, label: str) -> None: + """Consistent check of sleep result.""" + actual = self.mock_control.get_mock_ns() + self.assertEqual( + actual, + expected, + f"{label} mock time should end at {expected}, got {actual}", + ) + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_TIME_SLEEP_MSG, (sleep_time,)),), + f"{label} log records should match expected; got:\n{self._log_handler.to_tuple()}", + ) + + def test_localtime_default(self): + """test MockTime.localtime, mock_time.localtime and mock_localtime without an argument""" + with self.assertRaises(NotImplementedError) as context: + self.mock_control.localtime() + self._verify_localtime_default(context, "instance localtime") + self._log_handler.log_records.clear() + with self.assertRaises(NotImplementedError) as context: + mock_time.localtime() + self._verify_localtime_default(context, "mock_time.localtime") + self._log_handler.log_records.clear() + with self.assertRaises(NotImplementedError) as context: + mock_localtime() + self._verify_localtime_default(context, "mock_localtime") + + def _verify_localtime_default(self, context: RaisesContext, label: str) -> None: + """Consistent check of localtime result.""" + exc_data = get_context_exception(context) + self.assertEqual(repr(exc_data), repr(MOCKED_TIME_NOT_LOCALTIME_EX)) + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_TIME_LOCALTIME_MSG, (None,)),), + f"{label} log records should match expected; got:\n{self._log_handler.to_tuple()}", + ) + + def test_localtime_with_arg(self): + """test MockTime.localtime, mock_time.localtime and mock_localtime with an argument""" + # datetime(2000, 1, 1, 0, 0, 0, tzinfo=timezone.utc) = 946684800.0 + # time.localtime(946684800) = time.struct_time(tm_year=1999, tm_mon=12, tm_mday=31, + # tm_hour=17, tm_min=0, tm_sec=0, tm_wday=4, tm_yday=365, tm_isdst=0) + case = 946684800 # 2000-01-01 00:00:00 UTC + actual = self.mock_control.localtime(case) + self._verify_time_struct_fields("instance localtime", case, actual) + self._log_handler.log_records.clear() + actual = mock_time.localtime(case) + self._verify_time_struct_fields("mock_time.localtime", case, actual) + self._log_handler.log_records.clear() + actual = mock_localtime(case) + self._verify_time_struct_fields("mock_localtime", case, actual) + + def _verify_time_struct_fields( + self, context: str, case: int, actual: time.struct_time + ) -> None: + """Common field value verifications that are repeated a couple of times.""" + self.assertIsInstance( + actual, + time.struct_time, + f"localtime should return a time.struct_time, got {type(actual)}", + ) + self.assertEqual( + actual.tm_year, 2000, f"localtime year should be 2000, got {actual.tm_year}" + ) + self.assertEqual( + actual.tm_mon, 1, f"localtime month should be 1, got {actual.tm_mon}" + ) + self.assertEqual( + actual.tm_mday, 1, f"localtime day should be 1, got {actual.tm_mday}" + ) + self.assertEqual( + actual.tm_hour, 0, f"localtime hour should be 0, got {actual.tm_hour}" + ) + self.assertEqual( + actual.tm_min, 0, f"localtime minute should be 0, got {actual.tm_min}" + ) + self.assertEqual( + actual.tm_sec, 0, f"localtime second should be 0, got {actual.tm_sec}" + ) + self.assertEqual( + actual.tm_yday, + 1, + f"localtime day of year should be 1, got {actual.tm_yday}", + ) + self.assertEqual( + actual.tm_wday, 5, f"localtime weekday should be 5, got {actual.tm_wday}" + ) + expected = 0 if sys.implementation.name == "cpython" else -1 + self.assertEqual( + actual.tm_isdst, + expected, + f"localtime is dst should be {expected}, got {actual.tm_isdst}", + ) + iso = format_ns_as_iso_timestamp(case * 1_000_000_000) + expected = "2000-01-01T00:00:00" + self.assertEqual(iso, expected, f"utc iso should be {expected}, got {iso}") + self.assertEqual( + self._log_handler.to_tuple(), + (mock_info(MOCKED_TIME_LOCALTIME_MSG, (case,)),), + f"{context} log records should match expected; got:\n{self._log_handler.to_tuple()}", + ) + + +# end class TestMockTimeFunction() + + +def _setup_mock_log_capture(context: unittest.TestCase) -> None: + """ + Set up log capture for the test context. + + :param context: unittest.TestCase: test context + """ + # pylint:disable=protected-access + context._log_handler = ListHandler() + context.mogger: logging.Logger = setup_logger( + MOCK_LOGGER + ) # type:ignore # mocking logger + context._log_handler.log_only_to_me(context.mogger) + + +def _teardown_mock_log_capture(context: unittest.TestCase) -> None: + """ + Clean up log capture for the test context. + + :param context: unittest.TestCase: test context + """ + # pylint:disable=protected-access + assert isinstance(context._log_handler, ListHandler) + context.mogger.removeHandler(context._log_handler) + + +def _reset_mock_log_capture(context: unittest.TestCase) -> None: + """ + Reset log capture for the test context. + + :param context: unittest.TestCase: test context + """ + # pylint:disable=protected-access + context.mogger.setLevel(logging.DEBUG) # pylint:disable=no-member + context._log_handler.log_records.clear() + + +if __name__ == "__main__": + unittest.main() + +mock_cleanup() diff --git a/tests/test_ntp.py b/tests/test_ntp.py new file mode 100644 index 0000000..4f89cef --- /dev/null +++ b/tests/test_ntp.py @@ -0,0 +1,971 @@ +# SPDX-FileCopyrightText: 2024 H PHil Duby +# +# SPDX-License-Identifier: MIT + +""" +Unittests for adafruit_ntp +""" + +import unittest +import adafruit_logging as logging +from tests.mocks.mock_pool import MockPool, MockCallback, MockSocket +from tests.mocks.mock_time import MockTime +from tests.shared_for_testing import ( + ListHandler, + setup_logger, + set_utc_timezone, + mock_cleanup, + MOCK_LOGGER, + NS_PER_SEC, + MOCKED_TIME_DEFAULT_START_NS, + GETADDR_DNS_EX, +) + +# ntp_testing_support overrides sys.modules['time']. Make sure anything that needs +# the builtin time module is imported first. +from tests.ntp_testing_support import ( + BaseNTPTest, + mock_time, + logged_for_time_reference, + set_ntp_state, + DEFAULT_NTP_STATE, + INCOMPLETE_EX, + SENDTO_BROKENPIPE_EX, +) + +# Import the adafruit_ntp module components, which should import the mock versions of modules +# defined above, unless they are injected on instantiation. +from adafruit_ntp import NTP, EventType # pylint:disable=wrong-import-position + + +class TestNTPFunction(BaseNTPTest): # pylint:disable=too-many-public-methods + """Test cases for adafruit_ntp module functionality. + + Most tests needs to be done for 4 different scenario combinations: + - blocking or non-blocking + - with and without notification callbacks + With 'white box' testing, the notifications could just always be included. With or + without notification does not alter the flow of the code. Best to test both ways anyway, + to not rely on that. + + Once tests are done to verify that the requested notifications are working, all other + tests that include notifications can use a single 'all events' notification. The log + messages from that will show (log) all triggered events. + """ + + # Constants + # Testing scenarios blocking | notifications + LEGACY: str = "legacy" # yes no + MONITORING: str = "monitoring" # yes yes + NON_BLOCKING: str = "non-blocking" # no no + MON_NON_BLOCKING: str = "monitored non-blocking" # no yes + + @classmethod + def setUpClass(cls): + """Set up class-level resources.""" + set_utc_timezone() + cls.mock_time = MockTime() + cls.mock_pool = MockPool() + cls.mock_socket = cls.mock_pool.socket( + cls.mock_pool.AF_INET, cls.mock_pool.SOCK_DGRAM + ) + cls.mogger: logging.Logger = setup_logger(MOCK_LOGGER) + # py lint:disable=protected-access + cls._log_handler = ListHandler() + cls._log_handler.log_only_to_me(cls.mogger) + + @classmethod + def tearDownClass(cls): + """Get rid of the list test specific handler.""" + cls.mogger.removeHandler(cls._log_handler) + mock_time.clear_mock_singleton() + MockPool.clear_mock_singleton() + MockSocket.clear_mock_singleton() + + def setUp(self): + """Common initialization for each test method.""" + self.mock_pool.mock_getaddrinfo_attempts.clear() + self.mock_socket.mock_recv_into_attempts.clear() + start = DEFAULT_NTP_STATE.copy() + start.monotonic_ns = MOCKED_TIME_DEFAULT_START_NS + self.ntp = NTP(self.mock_pool) + set_ntp_state(start, self.ntp) + self.mogger.setLevel(logging.INFO) # pylint:disable=no-member + self._log_handler.log_records.clear() + + def tearDown(self): + """Clean up after each test.""" + + # @ unittest.skip('trace changed pattern') + def test_legacy_no_cache_dns_fail(self): # pylint:disable=invalid-name + """ + Test failing legacy NTP DNS address lookup without previous cached offset. + (blocking, no notification). + """ + notification_context = None + self.mock_pool.set_getaddrinfo_responses([GETADDR_DNS_EX] * 10) + + expected_state = self._set_and_verify_ntp_state(DEFAULT_NTP_STATE) + initial_time = self._prime_expected_dns_fail_state(expected_state) + + for _ in range( + 10 + ): # Do enough iterations to get into the maximum rate limiting delay + self._dns_failure_cycle(expected_state, notification_context) + if not expected_state.blocking: + # need to advance time to get past each rate limiting delay + saved_ns = self.mock_time.get_mock_ns() + self.mock_time.set_mock_ns(expected_state.limit_end) + expected_state.monotonic_ns = expected_state.limit_end + if not expected_state.blocking: + # Undo the final time advance + self.mock_time.set_mock_ns(saved_ns) + expected_state.monotonic_ns = saved_ns + self._post_dns_failures_check(initial_time) + + # @ unittest.skip('trace changed pattern') + def test_monitoring_no_cache_dns_fail(self): # pylint:disable=invalid-name + """ + Test failing monitoring NTP DNS address lookup without previous cached offset. + (blocking, with notification). + """ + configuration_scenario = self.MONITORING + notification_context = "all " + configuration_scenario + self.mock_pool.set_getaddrinfo_responses([GETADDR_DNS_EX] * 10) + test_cb = MockCallback(notification_context) + start = DEFAULT_NTP_STATE.copy() + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + + expected_state = self._set_and_verify_ntp_state(start) + initial_time = self._prime_expected_dns_fail_state(expected_state) + + for _ in range( + 10 + ): # Do enough iterations to get into the maximum rate limiting delay + self._dns_failure_cycle(expected_state, notification_context) + if not expected_state.blocking: + # need to advance time to get past each rate limiting delay + saved_ns = self.mock_time.get_mock_ns() + self.mock_time.set_mock_ns(expected_state.limit_end) + expected_state.monotonic_ns = expected_state.limit_end + if not expected_state.blocking: + # Undo the final time advance + self.mock_time.set_mock_ns(saved_ns) + expected_state.monotonic_ns = saved_ns + self._post_dns_failures_check(initial_time) + + # @ unittest.skip('trace changed pattern') + def test_non_blocking_no_cache_dns_fail( # pylint:disable=invalid-name + self, + ) -> None: + """ + Test failing non-blocking NTP DNS address lookup without previous cached offset. + (non-blocking, no notification). + """ + notification_context = None + self.mock_pool.set_getaddrinfo_responses([GETADDR_DNS_EX] * 10) + start = DEFAULT_NTP_STATE.copy() + start.blocking = False + self._iterate_dns_failure(10, start, notification_context) + + # @ unittest.skip('trace changed pattern') + def test_mon_non_blocking_no_cache_dns_fail( # pylint:disable=invalid-name + self, + ) -> None: + """ + Test failing mon-non-blocking NTP DNS address lookup without previous cached offset. + (non-blocking, with notification). + """ + configuration_scenario = self.MON_NON_BLOCKING + notification_context = "all " + configuration_scenario + self.mock_pool.set_getaddrinfo_responses([GETADDR_DNS_EX] * 10) + test_cb = MockCallback(notification_context) + start = DEFAULT_NTP_STATE.copy() + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + start.blocking = False + self._iterate_dns_failure(10, start, notification_context) + + # @ unittest.skip('trace changed pattern') + def test_rate_limiting_boundaries(self): # pylint:disable=invalid-name + """Test rate limiting delay interval boundary conditions.""" + # For context in this test. The actual values are constants used in the called methods + # that were created to DRY the code. + # configuration_scenario = self.NON_BLOCKING + # notification_context = None + # exception = INCOMPLETE_EX + + # 2 operations should attempt to get address information, because they are past the + # end of the rate limiting delay interval. + self.mock_pool.set_getaddrinfo_responses([GETADDR_DNS_EX] * 2) + + start = DEFAULT_NTP_STATE.copy() + start.blocking = False + start.monotonic_ns = NS_PER_SEC + 3 + start.limit_delay = int(self.DNS_BACKOFF.first * self.DNS_BACKOFF.factor) + start.limit_end = self.DNS_BACKOFF.first + start.monotonic_ns + start.state = self.NTP_STATE_MACHINE.GETTING_SOCKET + + self._before_boundary_check(start, start.monotonic_ns) + + # Still in the rate limiting delay period. + self._before_boundary_check(start, start.limit_end - 3) + + # Should be just past the end of the rate limiting delay. + self._past_boundary_check(start, start.limit_end - 2) + + # Well past the end of the rate limiting delay. + self._past_boundary_check(start, start.limit_end + 2000) + + self.assertEqual( + len(self.mock_pool.mock_getaddrinfo_attempts), + 0, + "rate limiting boundary checks should have used all of the queued getaddr responses: " + f"found {len(self.mock_pool.mock_getaddrinfo_attempts)} left.", + ) + + # @ unittest.skip('trace changed pattern') + def test_legacy_no_cache_get_dns(self): # pylint:disable=invalid-name + """ + Test successful legacy DNS address lookup without previous cached offset. + (blocking, no notification). + Fail on the immediately attempted NTP packet request. + """ + # configuration_scenario = self.LEGACY + # notification_context = None + expected_state, expected_log = self._expected_for_any_get_dns( + DEFAULT_NTP_STATE, None + ) + self._fail_ntp_operation_and_check_result( + INCOMPLETE_EX, expected_state, expected_log + ) + + # @ unittest.skip('trace changed pattern') + def test_monitoring_no_cache_get_dns(self): # pylint:disable=invalid-name + """ + Test successful monitoring DNS address lookup without previous cached offset. + (blocking, with notification). + Fail on the immediately attempted NTP packet request. + """ + # configuration_scenario = self.MONITORING + # DNS lookup succeeds, but does not generate any notifications. The Following NTP + # packet request is set to fail, and that generates a notification. + notification_context = "fail " + self.MONITORING + test_cb = MockCallback(notification_context) + start = DEFAULT_NTP_STATE.copy() + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + expected_state, expected_log = self._expected_for_any_get_dns( + start, notification_context + ) + self._fail_ntp_operation_and_check_result( + INCOMPLETE_EX, expected_state, expected_log + ) + + # @ unittest.skip('trace changed pattern') + def test_non_blocking_no_cache_get_dns(self): # pylint:disable=invalid-name + """ + Test successful non-blocking DNS address lookup without previous cached offset. + (not blocking, no notification). + """ + # configuration_scenario = self.NON_BLOCKING + # notification_context = None + start = DEFAULT_NTP_STATE.copy() + start.blocking = False + expected_state, expected_log = self._expected_for_any_get_dns(start, None) + self._fail_ntp_operation_and_check_result( + INCOMPLETE_EX, expected_state, expected_log + ) + + # @ unittest.skip('trace changed pattern') + def test_mon_non_blocking_no_cache_get_dns(self): # pylint:disable=invalid-name + """ + Test successful monitored non-blocking DNS address lookup without previous cached offset. + (not blocking, with notification). + """ + # configuration_scenario = self.MON_NON_BLOCKING + # No actual notifications are generated for this operation and scenario. + notification_context = "good " + self.MON_NON_BLOCKING + test_cb = MockCallback(notification_context) + start = DEFAULT_NTP_STATE.copy() + start.blocking = False + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + expected_state, expected_log = self._expected_for_any_get_dns( + start, notification_context + ) + self._fail_ntp_operation_and_check_result( + INCOMPLETE_EX, expected_state, expected_log + ) + + # @ unittest.skip('trace changed pattern') + def test_legacy_no_cache_send_fail(self): # pylint:disable=invalid-name + """ + Test failing legacy NTP packet send without previous cached offset. + (blocking, no notification). + + This should only occur if the network connection drops between getting the DNS + lookup and sending the NTP packet. + """ + # configuration_scenario = self.LEGACY + # notification_context = None + start = DEFAULT_NTP_STATE.copy() + expected_state, should_log, _ = self._configure_send_fail(start, None) + expected_log = tuple(should_log) + self._fail_ntp_operation_and_check_result( + SENDTO_BROKENPIPE_EX, expected_state, expected_log + ) + + # Should honour retry backoff rate limiting for next attempt (using sleep) + self._log_handler.log_records.clear() + should_log, _ = self._configure_send_retry_fail(expected_state, None) + expected_log = tuple(should_log) + self._fail_ntp_operation_and_check_result( + SENDTO_BROKENPIPE_EX, expected_state, expected_log + ) + + # @ unittest.skip('trace changed pattern') + def test_monitoring_no_cache_send_fail(self): # pylint:disable=invalid-name + """ + Test failing monitoring NTP packet send without previous cached offset. + (blocking, with notification). + + This should only occur if the network connection drops between getting the DNS + lookup and sending the NTP packet. + """ + # configuration_scenario = self.MONITORING + notification_context = "fail " + self.MONITORING + test_cb = MockCallback(notification_context) + start = DEFAULT_NTP_STATE.copy() + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + expected_state, should_log, _ = self._configure_send_fail( + start, notification_context + ) + expected_log = tuple(should_log) + self._fail_ntp_operation_and_check_result( + SENDTO_BROKENPIPE_EX, expected_state, expected_log + ) + + # @ unittest.skip('trace changed pattern') + def test_non_blocking_no_cache_send_fail(self): # pylint:disable=invalid-name + """ + Test failing non-blocking NTP packet send without previous cached offset. + (non-blocking, no notification). + + This should only occur if the network connection drops between getting the DNS + lookup and sending the NTP packet. + """ + # configuration_scenario = self.NON_BLOCKING + # notification_context = None + start = DEFAULT_NTP_STATE.copy() + start.blocking = False + expected_state, should_log, _ = self._configure_send_fail(start, None) + expected_log = tuple(should_log) + self._fail_ntp_operation_and_check_result( + INCOMPLETE_EX, expected_state, expected_log + ) + + # @ unittest.skip('trace changed pattern') + def test_mon_non_blocking_no_cache_send_fail(self): # pylint:disable=invalid-name + """ + Test failing monitoring non-blocking NTP packet send without previous cached offset. + (non-blocking, with notification). + + This should only occur if the network connection drops between getting the DNS + lookup and sending the NTP packet. + """ + # configuration_scenario = self.MON_NON_BLOCKING + notification_context = "fail " + self.MON_NON_BLOCKING + test_cb = MockCallback(notification_context) + start = DEFAULT_NTP_STATE.copy() + start.blocking = False + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + expected_state, should_log, _ = self._configure_send_fail( + start, notification_context + ) + expected_log = tuple(should_log) + self._fail_ntp_operation_and_check_result( + INCOMPLETE_EX, expected_state, expected_log + ) + + # @ unittest.skip('trace changed pattern') + def test_legacy_no_cache_ntp_fail(self): # pylint:disable=invalid-name + """ + Test failing legacy NTP packet request without previous cached offset. + (blocking, no notification). + """ + # configuration_scenario = self.LEGACY + # notification_context = None + start = DEFAULT_NTP_STATE.copy() + expected_state, should_log, _ = self._configure_ntp_fail(start, None) + expected_log = tuple(should_log) + self._fail_ntp_operation_and_check_result( + INCOMPLETE_EX, expected_state, expected_log + ) + + # @ unittest.skip('trace changed pattern') + def test_monitoring_no_cache_ntp_fail(self): # pylint:disable=invalid-name + """ + Test failing monitoring NTP packet request without previous cached offset. + (blocking, with notification). + """ + # configuration_scenario = self.MONITORING + notification_context = "fail " + self.MONITORING + test_cb = MockCallback(notification_context) + start = DEFAULT_NTP_STATE.copy() + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + expected_state, should_log, _ = self._configure_ntp_fail( + start, notification_context + ) + expected_log = tuple(should_log) + self._fail_ntp_operation_and_check_result( + INCOMPLETE_EX, expected_state, expected_log + ) + + # @ unittest.skip('trace changed pattern') + def test_non_blocking_no_cache_ntp_fail(self): # pylint:disable=invalid-name + """ + Test failing non-blocking NTP packet request without previous cached offset. + (non-blocking, no notification). + """ + # configuration_scenario = self.NON_BLOCKING + # notification_context = None + start = DEFAULT_NTP_STATE.copy() + start.blocking = False + expected_state, should_log, _ = self._configure_ntp_fail(start, None) + expected_log = tuple(should_log) + self._fail_ntp_operation_and_check_result( + INCOMPLETE_EX, expected_state, expected_log + ) + + # @ unittest.skip('trace changed pattern') + def test_mon_non_blocking_no_cache_ntp_fail(self): # pylint:disable=invalid-name + """ + Test failing monitoring non-blocking NTP packet request without previous cached offset. + (non-blocking, with notification). + """ + # configuration_scenario = self.MON_NON_BLOCKING + notification_context = "fail " + self.MON_NON_BLOCKING + start = DEFAULT_NTP_STATE.copy() + start.blocking = False + test_cb = MockCallback(notification_context) + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + expected_state, should_log, _ = self._configure_ntp_fail( + start, notification_context + ) + expected_log = tuple(should_log) + self._fail_ntp_operation_and_check_result( + INCOMPLETE_EX, expected_state, expected_log + ) + + # @ unittest.skip('trace changed pattern') + def test_legacy_no_cache_get_ntp(self): # pylint:disable=invalid-name + """ + Test successful legacy NTP packet request without previous cached offset. + (blocking, no notification). + """ + # configuration_scenario = self.LEGACY + # notification_context = None + start = DEFAULT_NTP_STATE.copy() + expected_state, should_log = self._configure_good_ntp(start, None) + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_monitoring_no_cache_get_ntp(self): # pylint:disable=invalid-name + """ + Test successful monitoring NTP packet request without previous cached offset. + (blocking, with notification). + """ + # configuration_scenario = self.MONITORING + notification_context = "good " + self.MONITORING + start = DEFAULT_NTP_STATE.copy() + test_cb = MockCallback(notification_context) + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + expected_state, should_log = self._configure_good_ntp( + start, notification_context + ) + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_non_blocking_no_cache_get_ntp(self): # pylint:disable=invalid-name + """ + Test successful non-blocking NTP packet request without previous cached offset. + (non-blocking, no notification). + """ + # configuration_scenario = self.NON_BLOCKING + # notification_context = None + start = DEFAULT_NTP_STATE.copy() + start.blocking = False + expected_state, should_log = self._configure_good_ntp(start, None) + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_mon_non_blocking_no_cache_get_ntp(self): # pylint:disable=invalid-name + """ + Test successful monitored non-blocking NTP packet request without previous cached offset. + (non-blocking, with notification). + """ + # configuration_scenario = self.MON_NON_BLOCKING + notification_context = "good " + self.MON_NON_BLOCKING + test_cb = MockCallback(notification_context) + start = DEFAULT_NTP_STATE.copy() + start.blocking = False + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + expected_state, should_log = self._configure_good_ntp( + start, notification_context + ) + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_legacy_cached_utc(self): # pylint:disable=invalid-name + """ + Test legacy get utc time with existing cached offset. + (blocking, no notification). + """ + # configuration_scenario = self.LEGACY + # notification_context = None + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + self._cached_ntp_operation_and_check_results(start) + + # @ unittest.skip('trace changed pattern') + def test_monitoring_cached_utc(self): # pylint:disable=invalid-name + """ + Test monitoring get utc time with existing cached offset. + (blocking, with notification). + """ + # configuration_scenario = self.MONITORING + notification_context = "good " + self.MONITORING + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + test_cb = MockCallback(notification_context) + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + self._cached_ntp_operation_and_check_results(start) + + # @ unittest.skip('trace changed pattern') + def test_non_blocking_cached_utc(self): # pylint:disable=invalid-name + """ + Test non-blocking get utc time with existing cached offset. + (non-blocking, no notification). + """ + # configuration_scenario = self.NON_BLOCKING + # notification_context = None + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.blocking = False + self._cached_ntp_operation_and_check_results(start) + + # @ unittest.skip('trace changed pattern') + def test_mon_non_block_cached_utc(self): # pylint:disable=invalid-name + """ + Test monitoring get utc time with existing cached offset. + (non-blocking, with notification). + """ + # configuration_scenario = self.MONITORING + notification_context = "good " + self.MONITORING + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.blocking = False + test_cb = MockCallback(notification_context) + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + self._cached_ntp_operation_and_check_results(start) + + # @ unittest.skip('trace changed pattern') + def test_legacy_cached_dns_fail(self): # pylint:disable=invalid-name + """ + Test failing legacy NTP DNS address lookup with existing cached offset. + (blocking, no notification). + + Base configuration is the same as the cached utc tests, adjusted so that the cached + offset has expired. + """ + # configuration_scenario = self.LEGACY + # notification_context = None + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + self.mock_pool.set_getaddrinfo_responses([GETADDR_DNS_EX]) + self._fallback_dns_fail(start, None) + + # @ unittest.skip('trace changed pattern') + def test_monitor_cached_dns_fail(self): # pylint:disable=invalid-name + """ + Test failing monitoring NTP DNS address lookup with existing cached offset. + (blocking, with notification). + + Base configuration is the same as the cached utc tests, adjusted so that the cached + offset has expired. + """ + # configuration_scenario = self.MONITORING + notification_context = "good " + self.MONITORING + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + test_cb = MockCallback(notification_context) + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + self.mock_pool.set_getaddrinfo_responses([GETADDR_DNS_EX]) + self._fallback_dns_fail(start, notification_context) + + # @ unittest.skip('trace changed pattern') + def test_non_blocking_cached_dns_fail(self): # pylint:disable=invalid-name + """ + Test failing non-blocking NTP DNS address lookup with existing cached offset. + (not blocking, no notification). + + Base configuration is the same as the cached utc tests, adjusted so that the cached + offset has expired. + """ + # configuration_scenario = self.NON_BLOCKING + # notification_context = None + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.blocking = False + self.mock_pool.set_getaddrinfo_responses([GETADDR_DNS_EX]) + self._fallback_dns_fail(start, None) + + # @ unittest.skip('trace changed pattern') + def test_mon_non_blocking_cached_dns_fail(self): # pylint:disable=invalid-name + """ + Test failing mon_non-blocking NTP DNS address lookup with existing cached offset. + (not blocking, with notification). + + Base configuration is the same as the cached utc tests, adjusted so that the cached + offset has expired. + """ + # configuration_scenario = self.MONITORING + notification_context = "good " + self.MON_NON_BLOCKING + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.blocking = False + test_cb = MockCallback(notification_context) + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + self.mock_pool.set_getaddrinfo_responses([GETADDR_DNS_EX]) + self._fallback_dns_fail(start, notification_context) + + # @ unittest.skip('trace changed pattern') + def test_legacy_cached_get_dns(self): # pylint:disable=invalid-name + """ + Test legacy get DNS address lookup with existing cached offset. + (blocking, no notification). + + Base configuration is the same as the cached utc tests, adjusted so that the cached + offset has expired. + + Since this is blocking mode, the successful dns lookup immediately continues to + attempt to get the NTP packet. That is set to fail for this test. + """ + # configuration_scenario = self.LEGACY + # notification_context = None + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.monotonic_ns = start.next_sync # Earliest time that cache will be expired + expected_state, expected_log = self._expected_for_any_get_dns(start, None) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_monitoring_cached_get_dns(self): # pylint:disable=invalid-name + """ + Test monitoring get DNS address lookup with existing cached offset. + (blocking, with notification). + + Base configuration is the same as the cached utc tests, adjusted so that the cached + offset has expired. + + Since this is blocking mode, the successful dns lookup immediately continues to + attempt to get the NTP packet. That is set to fail for this test. + """ + # configuration_scenario = self.MONITORING + # DNS lookup succeeds, but does not generate any notifications. The Following NTP + # packet request is set to fail, and that generates a notification. + notification_context = "fail " + self.MONITORING + test_cb = MockCallback(notification_context) + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + start.monotonic_ns = start.next_sync # Earliest time that cache will be expired + expected_state, expected_log = self._expected_for_any_get_dns( + start, notification_context + ) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_non_blocking_cached_get_dns(self): # pylint:disable=invalid-name + """ + Test non-blocking get DNS address lookup with existing cached offset. + (not blocking, no notification). + + Base configuration is the same as the cached utc tests, adjusted so that the cached + offset has expired. + """ + # configuration_scenario = self.NON_BLOCKING + # No actual notifications are generated for this operation and scenario. + notification_context = "good " + self.MON_NON_BLOCKING + test_cb = MockCallback(notification_context) + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.blocking = False + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + start.monotonic_ns = start.next_sync # Earliest time that cache will be expired + expected_state, expected_log = self._expected_for_any_get_dns(start, None) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_mon_non_blocking_cached_get_dns(self): # pylint:disable=invalid-name + """ + Test monitored non-blocking get DNS address lookup with existing cached offset. + (not blocking, with notification). + + Base configuration is the same as the cached utc tests, adjusted so that the cached + offset has expired. + """ + # configuration_scenario = self.NON_BLOCKING + # notification_context = None + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.blocking = False + start.monotonic_ns = start.next_sync # Earliest time that cache will be expired + expected_state, expected_log = self._expected_for_any_get_dns(start, None) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_legacy_cached_send_fail(self): # pylint:disable=invalid-name + """ + Test failing legacy NTP packet send with existing cached offset. + (blocking, no notification). + """ + # configuration_scenario = self.LEGACY + # notification_context = None + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.monotonic_ns = start.next_sync + 100 # some time after got dns + expected_state, should_log, next_ns = self._configure_send_fail(start, None) + next_ns = logged_for_time_reference(should_log, next_ns, False) + expected_state.monotonic_ns = next_ns - 1 + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # Should honour retry backoff rate limiting for next attempt (using sleep) + + # @ unittest.skip('trace changed pattern') + def test_monitoring_cached_send_fail(self): # pylint:disable=invalid-name + """ + Test failing monitoring NTP packet send with existing cached offset. + (blocking, with notification). + """ + # configuration_scenario = self.MONITORING + notification_context = "fail " + self.MONITORING + test_cb = MockCallback(notification_context) + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + start.monotonic_ns = start.next_sync + 100 # some time after got dns + expected_state, should_log, next_ns = self._configure_send_fail( + start, notification_context + ) + next_ns = logged_for_time_reference(should_log, next_ns, False) + expected_state.monotonic_ns = next_ns - 1 + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_non_blocking_cached_send_fail(self): # pylint:disable=invalid-name + """ + Test failing non-blocking NTP packet send with existing cached offset. + (non-blocking, no notification). + """ + # configuration_scenario = self.NON_BLOCKING + # notification_context = None + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.blocking = False + start.monotonic_ns = start.next_sync + 100 # some time after got dns + expected_state, should_log, next_ns = self._configure_send_fail(start, None) + # expected_state.monotonic_ns = next_ns + next_ns = logged_for_time_reference(should_log, next_ns, False) + expected_state.monotonic_ns = next_ns - 1 + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_mon_non_blocking_cached_send_fail(self): # pylint:disable=invalid-name + """ + Test failing monitoring non-blocking NTP packet send with existing cached offset. + (non-blocking, with notification). + """ + # configuration_scenario = self.MON_NON_BLOCKING + notification_context = "fail " + self.MON_NON_BLOCKING + test_cb = MockCallback(notification_context) + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.blocking = False + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + start.monotonic_ns = start.next_sync + 100 # some time after got dns + expected_state, should_log, next_ns = self._configure_send_fail( + start, notification_context + ) + next_ns = logged_for_time_reference(should_log, next_ns, False) + expected_state.monotonic_ns = next_ns - 1 + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_legacy_cached_ntp_fail(self): # pylint:disable=invalid-name + """ + Test failing legacy NTP packet request with existing cached offset. + (blocking, no notification). + """ + # configuration_scenario = self.LEGACY + # notification_context = None + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.monotonic_ns = start.next_sync + 100 # some time after got dns + expected_state, should_log, next_ns = self._configure_ntp_fail(start, None) + next_ns = logged_for_time_reference(should_log, next_ns, False) + expected_state.monotonic_ns = next_ns - 1 + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_monitoring_cached_ntp_fail(self): # pylint:disable=invalid-name + """ + Test failing monitoring NTP packet request with existing cached offset. + (blocking, with notification). + """ + # configuration_scenario = self.MONITORING + notification_context = "fail " + self.MONITORING + test_cb = MockCallback(notification_context) + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + start.monotonic_ns = start.next_sync + 100 # some time after got dns + expected_state, should_log, next_ns = self._configure_ntp_fail( + start, notification_context + ) + # if start.monotonic_start_ns > 0: + # # When have previous cached offset, use it + next_ns = logged_for_time_reference(should_log, next_ns, False) + expected_state.monotonic_ns = next_ns - 1 + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_non_blocking_cached_ntp_fail(self): # pylint:disable=invalid-name + """ + Test failing non-blocking NTP packet request with existing cached offset. + (non-blocking, no notification). + """ + # configuration_scenario = self.NON_BLOCKING + # notification_context = None + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.blocking = False + start.monotonic_ns = start.next_sync + 100 # some time after got dns + expected_state, should_log, next_ns = self._configure_ntp_fail(start, None) + # if start.monotonic_start_ns > 0: + # # When have previous cached offset, use it + next_ns = logged_for_time_reference(should_log, next_ns, False) + expected_state.monotonic_ns = next_ns - 1 + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_mon_non_blocking_cached_ntp_fail(self): # pylint:disable=invalid-name + """ + Test failing monitoring non-blocking NTP packet request with existing cached offset. + (non-blocking, with notification). + """ + # configuration_scenario = self.MON_NON_BLOCKING + notification_context = "fail " + self.MON_NON_BLOCKING + test_cb = MockCallback(notification_context) + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + start.blocking = False + start.monotonic_ns = start.next_sync + 100 # some time after got dns + expected_state, should_log, next_ns = self._configure_ntp_fail( + start, notification_context + ) + # if start.monotonic_start_ns > 0: + # # When have previous cached offset, use it + next_ns = logged_for_time_reference(should_log, next_ns, False) + expected_state.monotonic_ns = next_ns - 1 + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_legacy_cached_get_ntp(self): # pylint:disable=invalid-name + """ + Test successful legacy NTP packet request with existing cached offset. + (blocking, no notification). + """ + # configuration_scenario = self.LEGACY + # notification_context = None + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.monotonic_ns = start.next_sync + 100 # some time after got dns + expected_state, should_log = self._configure_good_ntp(start, None) + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_monitoring_cached_get_ntp(self): # pylint:disable=invalid-name + """ + Test successful monitoring NTP packet request with existing cached offset. + (blocking, with notification). + """ + # configuration_scenario = self.MONITORING + notification_context = "good " + self.MONITORING + test_cb = MockCallback(notification_context) + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + start.monotonic_ns = start.next_sync + 100 # some time after got dns + expected_state, should_log = self._configure_good_ntp( + start, notification_context + ) + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_non_blocking_cached_get_ntp(self): # pylint:disable=invalid-name + """ + Test successful non-blocking NTP packet request with existing cached offset. + (non-blocking, no notification). + """ + # configuration_scenario = self.NON_BLOCKING + # notification_context = None + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.blocking = False + start.monotonic_ns = start.next_sync + 100 # some time after got dns + expected_state, should_log = self._configure_good_ntp(start, None) + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + # @ unittest.skip('trace changed pattern') + def test_mon_non_blocking_cached_get_ntp(self): # pylint:disable=invalid-name + """ + Test successful monitored non-blocking NTP packet request with existing cached offset. + (blocking, with notification). + """ + # configuration_scenario = self.MON_NON_BLOCKING + notification_context = "good " + self.MON_NON_BLOCKING + test_cb = MockCallback(notification_context) + ntp_base_iso = "2024-01-01T09:11:22.456789123" + start = self._configure_cached_ntp(ntp_base_iso) + start.blocking = False + start.callbacks[test_cb.mock_callback] = EventType.ALL_EVENTS + start.monotonic_ns = start.next_sync + 100 # some time after got dns + expected_state, should_log = self._configure_good_ntp( + start, notification_context + ) + expected_log = tuple(should_log) + self._good_ntp_operation_and_check_result(expected_state, expected_log) + + +# end class TestNTPFunction(): + + +if __name__ == "__main__": + unittest.main() + +mock_cleanup() diff --git a/tests/test_ntp_instantiation.py b/tests/test_ntp_instantiation.py new file mode 100644 index 0000000..379f240 --- /dev/null +++ b/tests/test_ntp_instantiation.py @@ -0,0 +1,190 @@ +# SPDX-FileCopyrightText: 2024 H PHil Duby +# +# SPDX-License-Identifier: MIT + +""" +Unittests for instantiation of NTP instances from adafruit_ntp +""" + +import unittest +from tests.shared_for_testing import ( + # set_utc_timezone, + DEFAULT_NTP_ADDRESS, + NS_PER_SEC, +) +from tests.mocks.mock_pool import MockPool +from tests.shared_for_testing import set_utc_timezone, mock_cleanup + +# ntp_testing_support overrides sys.modules['time']. Make sure anything that needs +# the real time module is imported first. +from tests.ntp_testing_support import ( + NTP, + MockTime, + mock_time, + get_ntp_state, + match_expected_field_values, + DEFAULT_NTP_STATE, +) + + +class TestNTPInstantiation(unittest.TestCase): + """Test cases for constructing an NTP instance with various arguments. + + Other testing initializes the NTP instance using direct write to private properties. This + checks that those private properties are being set to the expected state by the constructor. + """ + + @classmethod + def setUpClass(cls): + """Set up class-level resources.""" + set_utc_timezone() + cls.mock_pool = MockPool() + cls.mock_time = MockTime() + cls.mock_time.set_mock_ns(NS_PER_SEC) + + @classmethod + def tearDownClass(cls): + """Clear the singleton instances.""" + MockPool.clear_mock_singleton() + mock_time.clear_mock_singleton() + + def setUp(self): + """Common initialization for each test method.""" + # self.ntp: NTP = None + + def test_default_constructor(self): + """Test the default constructor.""" + ntp = NTP(self.mock_pool) + expected_state = DEFAULT_NTP_STATE.copy() + unmatched = match_expected_field_values(expected_state, get_ntp_state(ntp)) + self.assertEqual( + unmatched, set(), f"NTP instance fields {unmatched} do not match expected" + ) + + def test_alternate_server(self): + """Test constructing with an alternate server url.""" + alt_server = "ntp.example.com" + ntp = NTP(self.mock_pool, server=alt_server) + expected_state = DEFAULT_NTP_STATE.copy() + expected_state.server = alt_server + unmatched = match_expected_field_values(expected_state, get_ntp_state(ntp)) + self.assertEqual( + unmatched, set(), f"NTP instance fields {unmatched} do not match expected" + ) + + def test_alternate_port(self): + """Test constructing with an alternate port number.""" + alt_port = 9876 + ntp = NTP(self.mock_pool, port=alt_port) + expected_state = DEFAULT_NTP_STATE.copy() + expected_state.port = alt_port + unmatched = match_expected_field_values(expected_state, get_ntp_state(ntp)) + self.assertEqual( + unmatched, set(), f"NTP instance fields {unmatched} do not match expected" + ) + + def test_alternate_tz_offset(self): + """Test constructing with an alternate time zone offset.""" + + alt_tz_offset = -1.5 + ntp = NTP(self.mock_pool, tz_offset=alt_tz_offset) + expected_state = DEFAULT_NTP_STATE.copy() + expected_state.tz_offset_ns = int(alt_tz_offset * 60 * 60) * NS_PER_SEC + unmatched = match_expected_field_values(expected_state, get_ntp_state(ntp)) + self.assertEqual( + unmatched, set(), f"NTP instance fields {unmatched} do not match expected" + ) + + def test_alternate_timeout(self): + """Test constructing with an alternate timeout value.""" + alt_timeout = 15 + ntp = NTP(self.mock_pool, socket_timeout=alt_timeout) + expected_state = DEFAULT_NTP_STATE.copy() + expected_state.socket_timeout = alt_timeout + unmatched = match_expected_field_values(expected_state, get_ntp_state(ntp)) + self.assertEqual( + unmatched, set(), f"NTP instance fields {unmatched} do not match expected" + ) + + def test_cache_minimum(self): + """Test constructing specify less than minimum cache duration.""" + min_cache = 60 * NS_PER_SEC + ntp = NTP(self.mock_pool, cache_seconds=55) + expected_state = DEFAULT_NTP_STATE.copy() + expected_state.cache_ns = min_cache + unmatched = match_expected_field_values(expected_state, get_ntp_state(ntp)) + self.assertEqual( + unmatched, set(), f"NTP instance fields {unmatched} do not match expected" + ) + + def test_alternate_cache(self): + """Test constructing with an alternate cache duration.""" + alt_cache = 355 * NS_PER_SEC + ntp = NTP(self.mock_pool, cache_seconds=alt_cache // NS_PER_SEC) + expected_state = DEFAULT_NTP_STATE.copy() + expected_state.cache_ns = alt_cache + unmatched = match_expected_field_values(expected_state, get_ntp_state(ntp)) + self.assertEqual( + unmatched, set(), f"NTP instance fields {unmatched} do not match expected" + ) + + def test_non_blocking(self): + """Test constructing in non-blocking mode.""" + ntp = NTP(self.mock_pool, blocking=False) + expected_state = DEFAULT_NTP_STATE.copy() + expected_state.blocking = False + unmatched = match_expected_field_values(expected_state, get_ntp_state(ntp)) + self.assertEqual( + unmatched, set(), f"NTP instance fields {unmatched} do not match expected" + ) + + def test_specified_defaults(self): + """Test constructing with specified default values.""" + ntp = NTP( + self.mock_pool, + server=DEFAULT_NTP_ADDRESS[0], + port=DEFAULT_NTP_ADDRESS[1], + tz_offset=0, + socket_timeout=10, + cache_seconds=60, + blocking=True, + ) + expected_state = DEFAULT_NTP_STATE.copy() + unmatched = match_expected_field_values(expected_state, get_ntp_state(ntp)) + self.assertEqual( + unmatched, set(), f"NTP instance fields {unmatched} do not match expected" + ) + + def test_specify_all_alternates(self): + """Test constructing with all alternate values.""" + alt_server = "ntp.example.com" + alt_port = 9876 + alt_tz_offset = -1.5 + alt_timeout = 15 + alt_cache = 355 * NS_PER_SEC + ntp = NTP( + self.mock_pool, + server=alt_server, + port=alt_port, + tz_offset=alt_tz_offset, + socket_timeout=alt_timeout, + cache_seconds=alt_cache // NS_PER_SEC, + blocking=False, + ) + expected_state = DEFAULT_NTP_STATE.copy() + expected_state.server = alt_server + expected_state.port = alt_port + expected_state.tz_offset_ns = int(alt_tz_offset * 60 * 60) * NS_PER_SEC + expected_state.socket_timeout = alt_timeout + expected_state.cache_ns = alt_cache + expected_state.blocking = False + unmatched = match_expected_field_values(expected_state, get_ntp_state(ntp)) + self.assertEqual( + unmatched, set(), f"NTP instance fields {unmatched} do not match expected" + ) + + +if __name__ == "__main__": + unittest.main() + +mock_cleanup() diff --git a/tests/test_ntp_registration.py b/tests/test_ntp_registration.py new file mode 100644 index 0000000..ac5d3c1 --- /dev/null +++ b/tests/test_ntp_registration.py @@ -0,0 +1,268 @@ +# SPDX-FileCopyrightText: 2024 H PHil Duby +# +# SPDX-License-Identifier: MIT + +""" +Unittests for registration functionality of NTP instances from adafruit_ntp +""" + +import unittest + +try: + from typing import Union +except ImportError: + pass +import adafruit_logging as logging +from tests.shared_for_testing import ( + ListHandler, + get_context_exception, + mock_info, + setup_logger, + mock_cleanup, + MOCK_LOGGER, + BAD_EVENT_MASK_MSG, + MOCKED_CALLBACK_MSG, +) +from tests.mocks.mock_pool import MockPool, MockCallback # , MockSocket + +# ntp_testing_support overrides sys.modules['time']. Make sure anything that needs +# the real time module is imported first. +from tests.ntp_testing_support import ( + NTP, + verify_generic_expected_state_and_log, + DEFAULT_NTP_STATE, +) +from adafruit_ntp import EventType, _IntFlag + + +class TestNTPRegistrations(unittest.TestCase): + """Test registration functionality of NTP instances.""" + + _log_handler: ListHandler = None + mogger: logging.Logger = None + + @classmethod + def setUpClass(cls): + """Set up class-level resources.""" + cls.mock_pool = MockPool() + cls.mogger: logging.Logger = setup_logger(MOCK_LOGGER) # mocking logger + cls._log_handler = ListHandler() + cls._log_handler.log_only_to_me(cls.mogger) + + @classmethod + def tearDownClass(cls): + """Clean up class-level resources.""" + cls.mogger.removeHandler(cls._log_handler) + MockPool.clear_mock_singleton() + + def setUp(self): + """Common initialization for each test method.""" + self.ntp = NTP(self.mock_pool) + self.mogger.setLevel(logging.INFO) # pylint:disable=no-member + self._log_handler.log_records.clear() + + def tearDown(self): + """Clean up after each test.""" + + def test_initialization(self): # pylint:disable=invalid-name + """Test NTP initialization.""" + self.assertIsNotNone(self.ntp) + expected_state = DEFAULT_NTP_STATE.copy() + verify_generic_expected_state_and_log( + self, + expected_state, + (), + "NTP state fields %s do not match the expected default", + "There should not be any log records generated during initialization; got: %s", + ) + + def test_no_notifications(self): # pylint:disable=invalid-name + """Test _notify_callbacks private method without any registered callbacks. + + The _notify_callbacks does not, and does not need to, verify that only expected events + are being notified. The calling code is responsible for making sure that only 'real' + events are being triggered. + """ + # Calling the private _notify_callbacks method directly. NTP instance state should + # not matter, and should not be changed. The Event(s) registered should not make a + # difference to that either. + expected_state = DEFAULT_NTP_STATE.copy() + # pylint:disable=protected-access + self.ntp._notify_ntp_event_callbacks(EventType.SYNC_COMPLETE, 1000) + self.ntp._notify_ntp_event_callbacks(EventType.SYNC_FAILED, 1001) + self.ntp._notify_ntp_event_callbacks(EventType.LOOKUP_FAILED, 1002) + self.ntp._notify_ntp_event_callbacks(EventType.NO_EVENT, 1003) + self.ntp._notify_ntp_event_callbacks(EventType.ALL_EVENTS, 1004) + self.ntp._notify_ntp_event_callbacks(127, 1005) + self.ntp._notify_ntp_event_callbacks(128, 1006) + # pylint:enable=protected-access + verify_generic_expected_state_and_log( + self, + expected_state, + (), + "NTP state fields %s do not match the expected default", + "No notification log records should be generated; got: %s", + ) + + def test_register_good_notification(self): # pylint:disable=invalid-name + """Test registering a callback for valid events.""" + context1 = "c1" + callback1 = MockCallback(context1) + self.ntp.register_ntp_event_callback( + callback1.mock_callback, EventType.SYNC_COMPLETE + ) + expected_state = DEFAULT_NTP_STATE.copy() + expected_state.callbacks[callback1.mock_callback] = ( + EventType.SYNC_COMPLETE.value + ) + verify_generic_expected_state_and_log( + self, + expected_state, + (), + "NTP(1) state fields %s do not match expected", + "Registering a callback should not add any log records; got: \n%s", + ) + + context2 = "c2" + callback2 = MockCallback(context2) + self.ntp.register_ntp_event_callback( + callback2.mock_callback, EventType.SYNC_FAILED + ) + expected_state.callbacks[callback2.mock_callback] = EventType.SYNC_FAILED + verify_generic_expected_state_and_log( + self, + expected_state, + (), + "NTP(2) state fields %s do not match expected", + "Registering callback2 should not add any log records; got: \n%s", + ) + + context3 = "c3" + callback3 = MockCallback(context3) + self.ntp.register_ntp_event_callback(callback3.mock_callback, 0b111) + expected_state.callbacks[callback3.mock_callback] = EventType.ALL_EVENTS + verify_generic_expected_state_and_log( + self, + expected_state, + (), + "NTP(3) state fields %s do not match expected", + "Registering callback3 should not add any log records; got: \n%s", + ) + + test_delay = 13579 + """Any legitimate unique delay amount, just to verify that the value is passed + to the notification.""" + + expected_log = ( + mock_info( + MOCKED_CALLBACK_MSG % (context1, EventType.SYNC_COMPLETE, test_delay) + ), + mock_info( + MOCKED_CALLBACK_MSG % (context3, EventType.SYNC_COMPLETE, test_delay) + ), + ) + # print(f'\n{len(self._log_handler.log_records) =}, {len(self._log_handler.to_tuple()) =}') + self.ntp._notify_ntp_event_callbacks( # pylint:disable=protected-access + EventType.SYNC_COMPLETE, test_delay + ) + # Triggering notifications should generate log records, but the order for a single event is + # not deterministic. When a single event results in multiple notifications, they could be + # in any order. Sort the log records for comparison purposes. + self.assertEqual( + tuple(sorted(self._log_handler.to_tuple(), key=lambda x: x[3])), + expected_log, + "Sync complete notification should generate these log records:\n" + f"{expected_log}; got: \n{self._log_handler.to_tuple()}", + ) + self._log_handler.log_records.clear() + + test_delay = 15793 + """Any legitimate unique delay amount, just to verify that the value is passed + to the notification.""" + expected_log = ( + mock_info( + MOCKED_CALLBACK_MSG % (context3, EventType.LOOKUP_FAILED, test_delay) + ), + ) + self.ntp._notify_ntp_event_callbacks( # pylint:disable=protected-access + EventType.LOOKUP_FAILED, test_delay + ) + verify_generic_expected_state_and_log( + self, + expected_state, + expected_log, + "NTP(x) state fields %s changed unexpectedly", + "Lookup failed notification should generate this log record:\n%s; got: \n%s", + ) + self._log_handler.log_records.clear() + + test_delay = 79153 + """Any legitimate unique delay amount, just to verify that the value is passed + to the notification.""" + expected_log = ( + mock_info( + MOCKED_CALLBACK_MSG % (context2, EventType.SYNC_FAILED, test_delay) + ), + mock_info( + MOCKED_CALLBACK_MSG % (context3, EventType.SYNC_FAILED, test_delay) + ), + ) + self.ntp._notify_ntp_event_callbacks( # pylint:disable=protected-access + EventType.SYNC_FAILED, test_delay + ) + self.assertEqual( + tuple(sorted(self._log_handler.to_tuple(), key=lambda x: x[3])), + expected_log, + "Sync failed notification should generate these log records:\n" + f"{expected_log}; got: \n{self._log_handler.to_tuple()}", + ) + self._log_handler.log_records.clear() + + verify_generic_expected_state_and_log( + self, + expected_state, + (), + "NTP state fields %s do not match expected after notifications", + "Log records should have been cleared; got: \n%s", + ) + + def test_register_bad_int_notification(self): # pylint:disable=invalid-name + """Test registering a callback for an invalid event.""" + callback = MockCallback("bad1") + bad_mask = 0b11111111 + self._verify_bad_notification(bad_mask, callback) + + def test_register_bad_intflag_notification2(self): # pylint:disable=invalid-name + """Test registering a callback for an invalid event.""" + callback = MockCallback("bad1a") + bad_mask = _IntFlag(0b11111111) + self._verify_bad_notification(bad_mask, callback) + + def _verify_bad_notification( + self, bad_mask: Union[_IntFlag, int], callback: MockCallback + ) -> None: + """Verify an invalid register does not change instance state or generate any log records.""" + effective_mask = bad_mask if isinstance(bad_mask, int) else bad_mask.value + type_error = TypeError(BAD_EVENT_MASK_MSG % f"{effective_mask:b}") + with self.assertRaises(type(type_error)) as context: + self.ntp.register_ntp_event_callback(callback.mock_callback, bad_mask) + exc_data = get_context_exception(context) + self.assertEqual(repr(exc_data), repr(type_error)) + + expected_state = DEFAULT_NTP_STATE.copy() + verify_generic_expected_state_and_log( + self, + expected_state, + (), + "NTP state fields %s do not match expected", + "Failing to registering a callback should not add any log records; got:\n %s", + ) + + +# end class TestNTPRegistrations() + + +if __name__ == "__main__": + unittest.main() + +mock_cleanup() From f6c0a9256b1aa325fd1db04d599e288fc6925d8e Mon Sep 17 00:00:00 2001 From: "H. Phil Duby" Date: Sat, 5 Oct 2024 23:16:58 -0600 Subject: [PATCH 2/7] update pre-commit hooks version --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 98790bc..b5bcce7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,7 +12,7 @@ repos: hooks: - id: reuse - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: check-yaml - id: end-of-file-fixer From 37e5a5dc78263846889470dab938242365005c9e Mon Sep 17 00:00:00 2001 From: "H. Phil Duby" Date: Sun, 6 Oct 2024 13:18:57 -0600 Subject: [PATCH 3/7] add adafruit-circuitpython-logging to optional requirements --- optional_requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/optional_requirements.txt b/optional_requirements.txt index d4e27c4..37d978d 100644 --- a/optional_requirements.txt +++ b/optional_requirements.txt @@ -1,3 +1,4 @@ # SPDX-FileCopyrightText: 2022 Alec Delaney, for Adafruit Industries # # SPDX-License-Identifier: Unlicense +adafruit-circuitpython-logging From 5404703dbee706d9d7101ddc864c2faec625e9a3 Mon Sep 17 00:00:00 2001 From: "H. Phil Duby" Date: Sun, 6 Oct 2024 13:49:49 -0600 Subject: [PATCH 4/7] Fix non-integer constant --- adafruit_ntp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/adafruit_ntp.py b/adafruit_ntp.py index 31a0c39..cdd0559 100644 --- a/adafruit_ntp.py +++ b/adafruit_ntp.py @@ -188,7 +188,7 @@ class NTP: # pylint:disable=too-many-instance-attributes """Time to wait before retry after first (network operation) failure.""" MAX_WAIT = int(60 * NS_PER_SEC) # maximum wait time in nanoseconds """Maximum time to wait before retry after a failure.""" - WAIT_FACTOR = const(1.5) # multiplier for exponential backoff + WAIT_FACTOR = 1.5 # multiplier for exponential backoff """Amount (multiplication factor) to increase the wait before retry after each failure.""" MINIMUM_RETRY_DELAY: int = const(60) # 60 seconds From 29345963324fb0dbf5e6af598381bc76c5518301 Mon Sep 17 00:00:00 2001 From: "H. Phil Duby" Date: Sun, 6 Oct 2024 15:06:31 -0600 Subject: [PATCH 5/7] Adjust docstring content for reST --- adafruit_ntp.py | 80 ++++++++++++++++++++++++---------------------- tests/test_mock.py | 4 +-- 2 files changed, 44 insertions(+), 40 deletions(-) diff --git a/adafruit_ntp.py b/adafruit_ntp.py index cdd0559..6b89d78 100644 --- a/adafruit_ntp.py +++ b/adafruit_ntp.py @@ -150,12 +150,11 @@ class EventType(_IntFlag): # pylint:disable=too-few-public-methods class NTPIncompleteError(TimeoutError): """ - NTP synchronize has never completed successfully yet. + Indicates that NTP synchronization has not yet completed successfully. Raised when an NTP operation cannot complete in non-blocking mode or during retries. - - For transient errors and partial progress. Further NTP calls will retry or - continue to the next step. + This exception represents transient errors and partial progress. Further NTP calls will + retry or continue to the next step. """ @@ -166,14 +165,14 @@ class NTP: # pylint:disable=too-many-instance-attributes This class uses a simple state machine to manage synchronization: - USING_CACHED_REFERENCE (state 3): The default state where the cached time reference is used. - - Transitions to GETTING_SOCKET when the cache expires. + - Transitions to GETTING_SOCKET when the cache expires. - GETTING_SOCKET (state 1): Attempts to perform a DNS lookup for the NTP server. - - Transitions to GETTING_PACKET on success. - - Remains in this state if retries are needed. + - Transitions to GETTING_PACKET on success. + - Remains in this state if retries are needed. - GETTING_PACKET (state 2): Sends an NTP request and waits for the response. - - Transitions back to USING_CACHED_REFERENCE. - - On failure, any existing cached value will continue to be used until the next scheduled - synchronization. + - Transitions back to USING_CACHED_REFERENCE. + - On failure, any existing cached value will continue to be used until the next scheduled + synchronization. The state transitions are managed by the `_update_time_sync` method, which is called if the cached time is expired when `utc_ns` is accessed. @@ -213,14 +212,16 @@ def __init__( # pylint:disable=too-many-arguments :param object socketpool: A socket provider such as CPython's `socket` module. :param str server: The domain (url) of the ntp server to query. :param int port: The port of the ntp server to query. - :param float tz_offset: Timezone offset in hours from UTC. Only useful for timezone ignorant - CircuitPython. CPython will determine timezone automatically and adjust (so don't use - this.) For example, Pacific daylight savings time is -7. + :param float tz_offset: Timezone offset in hours from UTC. This applies to both timezone + ignorant CircuitPython and CPython. CPython is aware of timezones, + but this code uses methods that do not access that, to be + consistent with CircuitPython. + For example, Pacific daylight savings time is -7. :param int socket_timeout: UDP socket timeout, in seconds. :param int cache_seconds: How many seconds to use a cached result from NTP server - (default 0, which respects NTP server's minimum). + (default 0, which respects NTP server's minimum). :param bool blocking: Determines whether the NTP operations should be blocking or - non-blocking. + non-blocking. """ self._pool: SocketPool = socketpool self._server: str = server @@ -251,25 +252,25 @@ def __init__( # pylint:disable=too-many-arguments @property def datetime(self) -> time.struct_time: """ - Time (structure) based on NTP sever time reference. Time synchronization is updated + Time (structure) based on NTP server time reference. Time synchronization is updated if the cache has expired. - CircuitPython always expects to be working with UTC. CPython though will use it's own + CircuitPython always expects to be working with UTC. CPython though will use its own notion of the current time zone when using localtime. To get those to be consistent, localtime is overridden (during import) to use gmtime when running in CPython. That - way this should always return UTC information. + way this should always return UTC based information. - :returns time.struct_time: current UTC time in seconds. + :returns time.struct_time: Current UTC time in seconds. """ current_time_s = self.utc_ns // NS_PER_SEC # seconds since unix epoch return localtime(current_time_s) @property def utc_ns(self) -> int: - """UTC (unix epoch) time in nanoseconds based on NTP sever time reference. + """UTC (unix epoch) time in nanoseconds based on NTP server time reference. Time synchronization updated if the cache has expired. - :returns: integer number of nanoseconds since the unix epoch (1970-01-01 00:00:00). + :returns int: Integer number of nanoseconds since the unix epoch (1970-01-01 00:00:00). :raises NTPIncompleteError: if no NTP synchronization has been successful yet. """ if time.monotonic_ns() > self._next_sync: @@ -373,26 +374,29 @@ def register_ntp_event_callback( Callbacks can be used to turn off the radio to save power, initiate a network connection, or other progress monitoring processes. - EG: wifi.radio.enabled = False or connection_manager.connect() + EG: `wifi.radio.enabled = False` or `connection_manager.connect()` + + .. caution:: - NOTE: This implementation does not prevent duplicate registration of the same callback. - All attempts to consistently identify when a callback is already registered have - failed due to the limitations of the current circuitpython implementation. Comparing - the callback value directly, converting to string using str(), or repr(), or to a - number using id() all have cases where an identical callback reference will be - treated as different. + This implementation does not prevent duplicate registration of the same callback. + All attempts to consistently identify when a callback is already registered have + failed due to the limitations of the current CircuitPython implementation. Comparing + the callback value directly, converting to string using `str()`, or `repr()`, or to a + number using `id()` all have cases where an identical callback reference will be + treated as different. - If the same callback is registered multiple times, with the same event type, it will - be called multiple times when that event type occurs. + If the same callback is registered multiple times, with the same event type, it will + be called multiple times when that event type occurs. :param Callable[[IntFlag, int], None] callback: The callback function to register. :param IntFlag event_types: The event types that should trigger this callback. This can - be a single event type or a combination of multiple events. - Defaults to EventType.SYNC_COMPLETE. - :raises TypeError: If the event_types argument is not a valid event type or combination of - event types. + be a single event type or a combination of multiple events. + Defaults to `EventType.SYNC_COMPLETE`. + :raises TypeError: If the `event_types` argument is not a valid event type or combination + of event types. + + Usage examples:: - Usage examples: from adafruit_ntp import NTP, EventType ntp = NTP(socketpool) @@ -410,9 +414,9 @@ def on_ntp_event(event_type: IntFlag, next_time: int) -> None: elif event_type == EventType.LOOKUP_FAILED: print(f"DNS lookup failed, need to verify active network connection.") - # Register for multiple events for a single callback - ntp.register_ntp_event_callback(on_ntp_event, EventType.SYNC_COMPLETE | \\ - EventType.SYNC_FAILED | EventType.LOOKUP_FAILED) + # Register a single callback for multiple events + ntp.register_ntp_event_callback(on_ntp_event, + EventType.SYNC_COMPLETE | EventType.SYNC_FAILED | EventType.LOOKUP_FAILED) """ if not isinstance(event_types, (_IntFlag, int)): raise TypeError(f"{type(event_types)} is not compatible with event types") diff --git a/tests/test_mock.py b/tests/test_mock.py index e75145b..9df49c4 100644 --- a/tests/test_mock.py +++ b/tests/test_mock.py @@ -1400,9 +1400,9 @@ def _setup_mock_log_capture(context: unittest.TestCase) -> None: """ # pylint:disable=protected-access context._log_handler = ListHandler() - context.mogger: logging.Logger = setup_logger( + context.mogger: logging.Logger = setup_logger( # type:ignore # mocking logger MOCK_LOGGER - ) # type:ignore # mocking logger + ) context._log_handler.log_only_to_me(context.mogger) From e45d3ab6db78bcbaf556dd475e94bddf78806782 Mon Sep 17 00:00:00 2001 From: "H. Phil Duby" Date: Sun, 6 Oct 2024 16:14:02 -0600 Subject: [PATCH 6/7] cleanup docstrings for Sphinx and reST add new example to documentation remove debug code from example --- adafruit_ntp.py | 32 +++++++++++++++++++------------- docs/examples.rst | 11 ++++++++++- examples/ntp_powersave.py | 10 ---------- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/adafruit_ntp.py b/adafruit_ntp.py index 6b89d78..8d27c48 100644 --- a/adafruit_ntp.py +++ b/adafruit_ntp.py @@ -165,17 +165,23 @@ class NTP: # pylint:disable=too-many-instance-attributes This class uses a simple state machine to manage synchronization: - USING_CACHED_REFERENCE (state 3): The default state where the cached time reference is used. - - Transitions to GETTING_SOCKET when the cache expires. + + - Transitions to GETTING_SOCKET when the cache expires. + - GETTING_SOCKET (state 1): Attempts to perform a DNS lookup for the NTP server. - - Transitions to GETTING_PACKET on success. - - Remains in this state if retries are needed. + + - Transitions to GETTING_PACKET on success. + - Remains in this state if retries are needed. + - GETTING_PACKET (state 2): Sends an NTP request and waits for the response. - - Transitions back to USING_CACHED_REFERENCE. - - On failure, any existing cached value will continue to be used until the next scheduled - synchronization. - The state transitions are managed by the `_update_time_sync` method, which is called if - the cached time is expired when `utc_ns` is accessed. + - Transitions back to USING_CACHED_REFERENCE. + + - On failure, any existing cached value will continue to be used until the next scheduled + synchronization. + + The state transitions are managed by the ``_update_time_sync`` method, which is called if + the cached time is expired when ``utc_ns`` is accessed. """ # State machine values @@ -374,15 +380,15 @@ def register_ntp_event_callback( Callbacks can be used to turn off the radio to save power, initiate a network connection, or other progress monitoring processes. - EG: `wifi.radio.enabled = False` or `connection_manager.connect()` + EG: ``wifi.radio.enabled = False`` or ``connection_manager.connect()`` .. caution:: This implementation does not prevent duplicate registration of the same callback. All attempts to consistently identify when a callback is already registered have failed due to the limitations of the current CircuitPython implementation. Comparing - the callback value directly, converting to string using `str()`, or `repr()`, or to a - number using `id()` all have cases where an identical callback reference will be + the callback value directly, converting to string using ``str()``, or ``repr()``, or to a + number using ``id()`` all have cases where an identical callback reference will be treated as different. If the same callback is registered multiple times, with the same event type, it will @@ -391,8 +397,8 @@ def register_ntp_event_callback( :param Callable[[IntFlag, int], None] callback: The callback function to register. :param IntFlag event_types: The event types that should trigger this callback. This can be a single event type or a combination of multiple events. - Defaults to `EventType.SYNC_COMPLETE`. - :raises TypeError: If the `event_types` argument is not a valid event type or combination + Defaults to ``EventType.SYNC_COMPLETE``. + :raises TypeError: If the ``event_types`` argument is not a valid event type or combination of event types. Usage examples:: diff --git a/docs/examples.rst b/docs/examples.rst index 053a48a..953d26e 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -8,7 +8,7 @@ Ensure your device works with this simple test that prints out the time from NTP :linenos: Set RTC ------------- +------- Sync your CircuitPython board's realtime clock (RTC) with time from NTP. @@ -16,6 +16,15 @@ Sync your CircuitPython board's realtime clock (RTC) with time from NTP. :caption: examples/ntp_set_rtc.py :linenos: +Using Notifications +------------------- + +Using notifications to monitor state and control power usage. + +.. literalinclude:: ../examples/ntp_powersave.py + :caption: examples/ntp_powersave.py + :linenos: + Simple test with CPython ------------------------ diff --git a/examples/ntp_powersave.py b/examples/ntp_powersave.py index fe52c6b..abe4863 100644 --- a/examples/ntp_powersave.py +++ b/examples/ntp_powersave.py @@ -22,12 +22,6 @@ def on_ntp_event(event_type: EventType, next_time: int): """Handle notifications from NTP about not using the radio for awhile.""" global check_connection # pylint:disable=global-statement - if event_type == EventType.NO_EVENT: - print( - "No Event: " - f"{time.monotonic_ns() =}, {wifi.radio.enabled =}, {wifi.radio.connected =}" - ) - return print(f"event {event_type}: Next operation scheduled at {next_time} ns") if event_type == EventType.LOOKUP_FAILED: check_connection = True @@ -82,9 +76,5 @@ def fmt_iso(datetime: time.struct_time) -> str: print(fmt_iso(ntp.datetime)) except NTPIncompleteError: print("Waiting for NTP information") - except Exception as ex: - print(f"{type(ex)}") - print(f"Exception: {ex}") - raise # other regular processing … time.sleep(1) From ce741e18f12233398ab793f5cba256ed46a48afe Mon Sep 17 00:00:00 2001 From: "H. Phil Duby" Date: Mon, 7 Oct 2024 19:53:19 -0600 Subject: [PATCH 7/7] Replace _IntFlag class with simple int constants --- .pylintrc | 4 +- adafruit_ntp.py | 101 +++++------------------------ examples/ntp_connection_manager.py | 4 +- examples/ntp_powersave.py | 4 +- examples/ntp_set_rtc.py | 6 +- examples/ntp_simpletest.py | 4 +- tests/mocks/mock_pool.py | 5 +- tests/ntp_testing_support.py | 19 ++---- tests/test_eventtype.py | 54 +++++---------- tests/test_ntp_registration.py | 25 +++---- 10 files changed, 59 insertions(+), 167 deletions(-) diff --git a/.pylintrc b/.pylintrc index 9425693..775e31f 100644 --- a/.pylintrc +++ b/.pylintrc @@ -19,7 +19,7 @@ ignore-patterns= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). -init-hook='import sys;sys.path.insert(0,"./lib")' +init-hook= # Use multiple processes to speed up Pylint. jobs=1 @@ -159,7 +159,7 @@ ignored-classes=optparse.Values,thread._local,_thread._local # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis. It # supports qualified module names, as well as Unix pattern matching. -ignored-modules=board,socketpool +ignored-modules= # Show a hint with possible names when a member name was not found. The aspect # of finding the hint is based on edit distance. diff --git a/adafruit_ntp.py b/adafruit_ntp.py index 8d27c48..0374aa2 100644 --- a/adafruit_ntp.py +++ b/adafruit_ntp.py @@ -37,7 +37,7 @@ except ImportError: pass try: - from typing import Callable, Optional, Union, Tuple, Dict + from typing import Callable, Optional, Tuple, Dict except ImportError: pass try: @@ -73,79 +73,14 @@ def recv_into( PACKET_SIZE: int = 48 -# No enum module in CircuitPython, so implement simple version. -class _IntFlag: # pylint:disable=too-few-public-methods - """Simple private implementation of an IntFlag enum. - - This contains hard-coded limits specific to the EventType class.""" - - def __init__(self, value=0): - if isinstance(value, _IntFlag): - self.value = value.value - else: - self.value = value - - def __and__(self, other): - if isinstance(other, _IntFlag): - return _IntFlag(self.value & other.value) - return self.value & other - - def __or__(self, other): - if isinstance(other, _IntFlag): - return _IntFlag(self.value | other.value) - return self.value | other - - def __xor__(self, other): - if isinstance(other, _IntFlag): - return _IntFlag(self.value ^ other.value) - return self.value ^ other - - def __add__(self, other): - return self.__or__(other) - - def __gt__(self, other): - if isinstance(other, _IntFlag): - return self.value > other.value - return self.value > other - - def __lt__(self, other): - if isinstance(other, _IntFlag): - return self.value < other.value - return self.value < other - - def __invert__(self): - # This relies on ALL_EVENTS existing, and being set correctly. - all_flags = EventType.ALL_EVENTS.value - return _IntFlag(~self.value & all_flags) - - def __repr__(self): - return f"IntFlag({self.value})" - - def __eq__(self, other): - if isinstance(other, _IntFlag): - return self.value == other.value - return self.value == other - - def __bool__(self): - return bool(self.value) - - @classmethod - def from_value(cls, value): - """enum emulation""" - return cls(value) - - -class EventType(_IntFlag): # pylint:disable=too-few-public-methods +class EventType: # pylint:disable=too-few-public-methods """NTP callback notification Event Types.""" - NO_EVENT = _IntFlag(0b000) - SYNC_COMPLETE = _IntFlag(0b001) - SYNC_FAILED = _IntFlag(0b010) # get packet failed - LOOKUP_FAILED = _IntFlag(0b100) # getaddrinfo failed (not timedout?) - # The custom _IntFlat class used uses the ALL_EVENTS attribute here to get a bit mask to use - # when inverting flag values. Make sure to update this if changing how many event types are - # being used. - ALL_EVENTS = _IntFlag(0b111) + NO_EVENT = const(0b000) + SYNC_COMPLETE = const(0b001) + SYNC_FAILED = const(0b010) # get packet failed + LOOKUP_FAILED = const(0b100) # getaddrinfo failed (not timedout?) + ALL_EVENTS = const(0b111) class NTPIncompleteError(TimeoutError): @@ -247,7 +182,7 @@ def __init__( # pylint:disable=too-many-arguments self._next_sync: int = 0 self._state: int = self.USING_CACHED_REFERENCE self._last_sync_time: int = 0 # Track the last successful sync time - self._callbacks: Dict[Callable[[_IntFlag, int], None], _IntFlag] = {} + self._callbacks: Dict[Callable[[int, int], None], int] = {} # The variables _next_rate_limit_end and _next_rate_limit_delay are intentionally # not initialized here because they are only used after calling # _initialize_backoff_timing(). Accessing them before initialization will raise an @@ -317,7 +252,6 @@ def _update_time_sync(self) -> None: - GETTING_SOCKET: Perform DNS lookup for the NTP server. - GETTING_PACKET: Send NTP request and await response. """ - self._notify_ntp_event_callbacks(EventType.NO_EVENT, -1) if self._state == self.USING_CACHED_REFERENCE: # Cached offset value expired, reinitialize backoff timing and proceed to DNS lookup. self._initialize_backoff_timing() @@ -372,8 +306,8 @@ def _server_dns_lookup(self) -> None: def register_ntp_event_callback( self, - callback: Callable[[_IntFlag, int], None], - event_types: Union[_IntFlag, int] = EventType.SYNC_COMPLETE, + callback: Callable[[int, int], None], + event_types: int = EventType.SYNC_COMPLETE, ) -> None: """ Register a callback to be notified for specific NTP events. @@ -424,23 +358,20 @@ def on_ntp_event(event_type: IntFlag, next_time: int) -> None: ntp.register_ntp_event_callback(on_ntp_event, EventType.SYNC_COMPLETE | EventType.SYNC_FAILED | EventType.LOOKUP_FAILED) """ - if not isinstance(event_types, (_IntFlag, int)): + if not isinstance(event_types, int): raise TypeError(f"{type(event_types)} is not compatible with event types") - if not ( - (EventType.ALL_EVENTS | event_types) == EventType.ALL_EVENTS - or event_types == EventType.NO_EVENT + if ( + EventType.ALL_EVENTS | event_types != EventType.ALL_EVENTS + or event_types == 0 ): - events_value = ( - event_types if isinstance(event_types, int) else event_types.value - ) raise TypeError( - f"Invalid event type mask 0b{events_value:b}. " + f"Invalid event type mask 0b{event_types:b}. " "Only known events can receive notifications." ) self._callbacks[callback] = event_types def _notify_ntp_event_callbacks( - self, event_type: _IntFlag, next_operation_time: int + self, event_type: int, next_operation_time: int ) -> None: """ Call all registered callbacks that are interested in the given event type. diff --git a/examples/ntp_connection_manager.py b/examples/ntp_connection_manager.py index db3e4e9..b59c901 100644 --- a/examples/ntp_connection_manager.py +++ b/examples/ntp_connection_manager.py @@ -10,7 +10,7 @@ # determine which radio is available try: - import wifi + import wifi # type:ignore import os # adjust method to get credentials as necessary... @@ -22,7 +22,7 @@ except ImportError: import board from digitalio import DigitalInOut - from adafruit_wiznet5k.adafruit_wiznet5k import WIZNET5K + from adafruit_wiznet5k.adafruit_wiznet5k import WIZNET5K # type:ignore # adjust with busio.SPI() as necessary... spi = board.SPI() diff --git a/examples/ntp_powersave.py b/examples/ntp_powersave.py index abe4863..30000b3 100644 --- a/examples/ntp_powersave.py +++ b/examples/ntp_powersave.py @@ -6,8 +6,8 @@ import os import time -import socketpool -import wifi +import socketpool # type:ignore +import wifi # type:ignore from adafruit_ntp import NTP, EventType, NTPIncompleteError diff --git a/examples/ntp_set_rtc.py b/examples/ntp_set_rtc.py index e0be6f9..5725501 100644 --- a/examples/ntp_set_rtc.py +++ b/examples/ntp_set_rtc.py @@ -6,9 +6,9 @@ import os import time -import rtc -import socketpool -import wifi +import rtc # type:ignore +import socketpool # type:ignore +import wifi # type:ignore import adafruit_ntp diff --git a/examples/ntp_simpletest.py b/examples/ntp_simpletest.py index 59b88bb..538b706 100644 --- a/examples/ntp_simpletest.py +++ b/examples/ntp_simpletest.py @@ -6,8 +6,8 @@ import os import time -import socketpool -import wifi +import socketpool # type:ignore +import wifi # type:ignore import adafruit_ntp diff --git a/tests/mocks/mock_pool.py b/tests/mocks/mock_pool.py index 1d558fb..7a431a2 100644 --- a/tests/mocks/mock_pool.py +++ b/tests/mocks/mock_pool.py @@ -228,7 +228,7 @@ def socket( return MockSocket(family, type, proto) def set_getaddrinfo_responses( - self, responses: List[Union[Exception, GetAddressInfoT]] + self, responses: List[Union[Exception, GetAddressInfoT]] # type:ignore ) -> None: # type:ignore """Set a sequence of responses for the getaddrinfo method.""" self.mock_getaddrinfo_attempts = responses @@ -256,8 +256,7 @@ def __init__(self, context: str): def mock_callback(self, event: int, delay: int) -> None: """Log which callback was called, and the triggering event details""" - event_num = event if isinstance(event, int) else event.value - if event_num == 0: + if event == 0: _logger.debug(MOCKED_CALLBACK_MSG % (self.context, event, delay)) return _logger.info(MOCKED_CALLBACK_MSG % (self.context, event, delay)) diff --git a/tests/ntp_testing_support.py b/tests/ntp_testing_support.py index fe7eb36..fe29085 100644 --- a/tests/ntp_testing_support.py +++ b/tests/ntp_testing_support.py @@ -66,7 +66,6 @@ from adafruit_ntp import ( # pylint:disable=wrong-import-position NTP, EventType, - _IntFlag, NTPIncompleteError, ) # pylint:disable=wrong-import-position @@ -120,7 +119,7 @@ def __init__( monotonic_start_ns: int = 0, next_sync: int = 0, last_sync_time: int = 0, - callbacks: Dict[Callable[[_IntFlag, int], None], _IntFlag] = None, + callbacks: Dict[Callable[[int, int], None], int] = None, state: int = 0, blocking: bool = True, monotonic_ns: int = 0, # mocked functionality internal state information @@ -138,7 +137,7 @@ def __init__( self.monotonic_start_ns: int = monotonic_start_ns self.next_sync: int = next_sync self.last_sync_time: int = last_sync_time - self.callbacks: Dict[Callable[[_IntFlag, int], None], _IntFlag] = ( + self.callbacks: Dict[Callable[[int, int], None], int] = ( {} if callbacks is None else dict(callbacks) ) self.state: int = state @@ -865,19 +864,11 @@ def _changed_state_fields( # in memory. # All attempts to find a data structure and comparison code to match expected and actual # state dictionary callable instance have failed. An expected entry created from exactly - # the same method as was registered still compares as not equal. The best can apparently - # be done, is to make sure that the registered event masks are the same. + # the same method as was registered still compares as not equal. The best that can + # apparently be done, is to make sure that the registered event masks are the same. callback1 = getattr(state1, field) callback2 = getattr(state2, field) - # Forcing all values to IntFlag instances seems easier than extracting the .value when the - # dict value is an IntFlag. Need to get them to be consistent for the sorting (on - # circuitpython) to work. Otherwise can get. - # TypeError: unsupported types for __lt__: 'int', 'IntFlag' - # Either cpython automatically reverse the comparison, or it happens to only compare IntFlag - # to int, and not int to IntFlag. - if sorted([_IntFlag(v) for v in callback1.values()]) != sorted( - [_IntFlag(v) for v in callback2.values()] - ): + if sorted(callback1.values()) != sorted(callback2.values()): changed_fields.add(field) return changed_fields diff --git a/tests/test_eventtype.py b/tests/test_eventtype.py index d0d5bbc..7a17cc3 100644 --- a/tests/test_eventtype.py +++ b/tests/test_eventtype.py @@ -9,7 +9,7 @@ import unittest from tests.shared_for_testing import mock_cleanup -from adafruit_ntp import EventType, _IntFlag +from adafruit_ntp import EventType class TestEventType(unittest.TestCase): @@ -17,46 +17,41 @@ class TestEventType(unittest.TestCase): def test_basic_event_types(self): """Test basic event types and their values.""" - self.assertEqual(EventType.NO_EVENT.value, _IntFlag(0b000)) # 0 - self.assertEqual(EventType.SYNC_COMPLETE.value, _IntFlag(0b001)) # 1 - self.assertEqual(EventType.SYNC_FAILED.value, _IntFlag(0b010)) # 2 - self.assertEqual(EventType.LOOKUP_FAILED.value, _IntFlag(0b100)) # 4 - self.assertEqual(EventType.ALL_EVENTS.value, _IntFlag(0b111)) # 7 + self.assertEqual(EventType.NO_EVENT, 0b000) # 0 + self.assertEqual(EventType.SYNC_COMPLETE, 0b001) # 1 + self.assertEqual(EventType.SYNC_FAILED, 0b010) # 2 + self.assertEqual(EventType.LOOKUP_FAILED, 0b100) # 4 + self.assertEqual(EventType.ALL_EVENTS, 0b111) # 7 def test_event_combination(self): """Test bitwise OR combination of event types.""" combined_event = EventType.SYNC_COMPLETE | EventType.LOOKUP_FAILED - self.assertEqual(combined_event, _IntFlag(0b101)) # 1 | 4 = 5 - self.assertEqual(combined_event.value, 0b101) # 1 | 4 = 5 + self.assertEqual(combined_event, 0b101) # 1 | 4 = 5 combined_event = EventType.SYNC_COMPLETE | EventType.SYNC_FAILED - self.assertEqual(combined_event, _IntFlag(0b011)) # 1 | 2 = 3 - self.assertEqual(combined_event.value, 0b011) # 1 | 2 = 3 + self.assertEqual(combined_event, 0b011) # 1 | 2 = 3 combined_event = EventType.SYNC_FAILED | EventType.LOOKUP_FAILED - self.assertEqual(combined_event, _IntFlag(0b110)) # 2 | 4 = 6 - self.assertEqual(combined_event.value, 0b110) # 2 | 4 = 6 + self.assertEqual(combined_event, 0b110) # 2 | 4 = 6 def test_event_intersection(self): """Test bitwise AND intersection of event types.""" combined_event = EventType.SYNC_COMPLETE | EventType.LOOKUP_FAILED intersect_event = combined_event & EventType.SYNC_COMPLETE - self.assertEqual(intersect_event.value, 0b001) # 5 & 1 = 1 + self.assertEqual(intersect_event, 0b001) # 5 & 1 = 1 intersect_event = combined_event & EventType.SYNC_FAILED - self.assertEqual(intersect_event.value, 0b000) # 5 & 2 = 0 + self.assertEqual(intersect_event, 0b000) # 5 & 2 = 0 def test_event_complement(self): """Test bitwise NOT complement of event types.""" - complement_event = ~EventType.SYNC_COMPLETE + complement_event = (~EventType.SYNC_COMPLETE) & 0b111 # Expected to be the bitwise complement of SYNC_COMPLETE within the range of all flags - expected_value = 2 | EventType.SYNC_FAILED.value | EventType.LOOKUP_FAILED.value - self.assertEqual(complement_event.value, expected_value) - self.assertNotEqual(complement_event.value, EventType.SYNC_COMPLETE.value) + expected_value = 2 | EventType.SYNC_FAILED | EventType.LOOKUP_FAILED + self.assertEqual(complement_event, expected_value) + self.assertNotEqual(complement_event, EventType.SYNC_COMPLETE) self.assertEqual(complement_event & EventType.SYNC_COMPLETE, 0b000) self.assertEqual(complement_event & EventType.SYNC_COMPLETE, EventType.NO_EVENT) - self.assertEqual(~EventType.NO_EVENT, 0b111) - self.assertEqual(~EventType.NO_EVENT, EventType.ALL_EVENTS) def test_event_equality(self): """Test equality between event types.""" @@ -64,28 +59,13 @@ def test_event_equality(self): event2 = EventType.SYNC_COMPLETE self.assertEqual(event1, event2) - combined_event = EventType.SYNC_COMPLETE | EventType.LOOKUP_FAILED - self.assertNotEqual(event1, combined_event) - - self.assertEqual(EventType.SYNC_COMPLETE, EventType.SYNC_COMPLETE.value) - self.assertEqual(EventType.SYNC_COMPLETE, _IntFlag(0b001)) - self.assertEqual(EventType.SYNC_COMPLETE, 0b001) - self.assertNotEqual(EventType.SYNC_COMPLETE, EventType.SYNC_FAILED) - self.assertNotEqual(EventType.SYNC_COMPLETE, EventType.NO_EVENT) - self.assertNotEqual(EventType.SYNC_COMPLETE, EventType.LOOKUP_FAILED) - self.assertNotEqual(EventType.SYNC_COMPLETE, EventType.ALL_EVENTS) - self.assertNotEqual(EventType.SYNC_COMPLETE, _IntFlag(0b000)) - self.assertNotEqual(EventType.SYNC_COMPLETE, 0b000) - self.assertNotEqual(EventType.SYNC_COMPLETE, _IntFlag(0b010)) - self.assertNotEqual(EventType.SYNC_COMPLETE, 0b010) - def test_event_xor(self): """Test bitwise XOR operation of event types.""" xor_event = EventType.SYNC_COMPLETE ^ EventType.SYNC_FAILED - self.assertEqual(xor_event.value, 0b011) # 1 ^ 2 = 3 + self.assertEqual(xor_event, 0b011) # 1 ^ 2 = 3 xor_event = EventType.SYNC_COMPLETE ^ EventType.SYNC_COMPLETE - self.assertEqual(xor_event.value, 0b000) # 1 ^ 1 = 0 + self.assertEqual(xor_event, 0b000) # 1 ^ 1 = 0 def test_event_to_bool(self): """Test conversion of event types to bool.""" diff --git a/tests/test_ntp_registration.py b/tests/test_ntp_registration.py index ac5d3c1..2960589 100644 --- a/tests/test_ntp_registration.py +++ b/tests/test_ntp_registration.py @@ -8,10 +8,6 @@ import unittest -try: - from typing import Union -except ImportError: - pass import adafruit_logging as logging from tests.shared_for_testing import ( ListHandler, @@ -32,7 +28,7 @@ verify_generic_expected_state_and_log, DEFAULT_NTP_STATE, ) -from adafruit_ntp import EventType, _IntFlag +from adafruit_ntp import EventType class TestNTPRegistrations(unittest.TestCase): @@ -112,9 +108,7 @@ def test_register_good_notification(self): # pylint:disable=invalid-name callback1.mock_callback, EventType.SYNC_COMPLETE ) expected_state = DEFAULT_NTP_STATE.copy() - expected_state.callbacks[callback1.mock_callback] = ( - EventType.SYNC_COMPLETE.value - ) + expected_state.callbacks[callback1.mock_callback] = EventType.SYNC_COMPLETE verify_generic_expected_state_and_log( self, expected_state, @@ -227,23 +221,20 @@ def test_register_good_notification(self): # pylint:disable=invalid-name ) def test_register_bad_int_notification(self): # pylint:disable=invalid-name - """Test registering a callback for an invalid event.""" + """Test registering a callback for an invalid (too big) event.""" callback = MockCallback("bad1") bad_mask = 0b11111111 self._verify_bad_notification(bad_mask, callback) - def test_register_bad_intflag_notification2(self): # pylint:disable=invalid-name - """Test registering a callback for an invalid event.""" + def test_register_bad_int_zero_notification(self): # pylint:disable=invalid-name + """Test registering a callback for an invalid zero event.""" callback = MockCallback("bad1a") - bad_mask = _IntFlag(0b11111111) + bad_mask = 0 self._verify_bad_notification(bad_mask, callback) - def _verify_bad_notification( - self, bad_mask: Union[_IntFlag, int], callback: MockCallback - ) -> None: + def _verify_bad_notification(self, bad_mask: int, callback: MockCallback) -> None: """Verify an invalid register does not change instance state or generate any log records.""" - effective_mask = bad_mask if isinstance(bad_mask, int) else bad_mask.value - type_error = TypeError(BAD_EVENT_MASK_MSG % f"{effective_mask:b}") + type_error = TypeError(BAD_EVENT_MASK_MSG % f"{bad_mask:b}") with self.assertRaises(type(type_error)) as context: self.ntp.register_ntp_event_callback(callback.mock_callback, bad_mask) exc_data = get_context_exception(context)