From 3fd618b503620074a60ddfd9ca09f7c37742c009 Mon Sep 17 00:00:00 2001 From: carlad Date: Tue, 11 Jan 2022 16:27:31 +0100 Subject: [PATCH 1/9] align dependencies --- poetry.lock | 727 ++++++++++++++++++++++++++----------------------- pyproject.toml | 14 +- 2 files changed, 386 insertions(+), 355 deletions(-) diff --git a/poetry.lock b/poetry.lock index 7cd403270156..37cd20a4458b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -11,11 +11,11 @@ six = "*" [[package]] name = "aio-pika" -version = "6.8.0" +version = "6.8.1" description = "Wrapper for the aiormq for asyncio and humans." category = "main" optional = false -python-versions = ">3.5.*, <4" +python-versions = ">=3.5, <4" [package.dependencies] aiormq = ">=3.2.3,<4" @@ -53,7 +53,7 @@ speedups = ["aiodns", "brotlipy", "cchardet"] [[package]] name = "aioresponses" -version = "0.7.2" +version = "0.7.3" description = "Mock out requests made by ClientSession from aiohttp package" category = "dev" optional = false @@ -79,7 +79,7 @@ develop = ["aiomisc (>=11.0,<12.0)", "async-generator", "coverage (!=4.3)", "cov [[package]] name = "anyio" -version = "3.4.0" +version = "3.5.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" category = "dev" optional = false @@ -91,7 +91,7 @@ sniffio = ">=1.1" typing-extensions = {version = "*", markers = "python_version < \"3.8\""} [package.extras] -doc = ["sphinx-rtd-theme", "sphinx-autodoc-typehints (>=1.2.0)"] +doc = ["packaging", "sphinx-rtd-theme", "sphinx-autodoc-typehints (>=1.2.0)"] test = ["coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "pytest (>=6.0)", "pytest-mock (>=3.6.1)", "trustme", "contextlib2", "uvloop (<0.15)", "mock (>=4)", "uvloop (>=0.15)"] trio = ["trio (>=0.16)"] @@ -180,7 +180,7 @@ tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (> [[package]] name = "azure-core" -version = "1.21.0" +version = "1.21.1" description = "Microsoft Azure Core Library for Python" category = "dev" optional = false @@ -258,14 +258,14 @@ numpy = ">=1.15.0" [[package]] name = "boto3" -version = "1.20.20" +version = "1.20.32" description = "The AWS SDK for Python" category = "main" optional = false python-versions = ">= 3.6" [package.dependencies] -botocore = ">=1.23.20,<1.24.0" +botocore = ">=1.23.32,<1.24.0" jmespath = ">=0.7.1,<1.0.0" s3transfer = ">=0.5.0,<0.6.0" @@ -274,7 +274,7 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.23.20" +version = "1.23.32" description = "Low-level, data-driven core of boto 3." category = "main" optional = false @@ -404,11 +404,11 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "colorclass" -version = "2.2.0" +version = "2.2.2" description = "Colorful worry-free console applications for Linux, Mac OS X, and Windows." category = "main" optional = false -python-versions = "*" +python-versions = ">=2.6" [[package]] name = "coloredlogs" @@ -528,7 +528,7 @@ python-versions = "*" [[package]] name = "decorator" -version = "5.1.0" +version = "5.1.1" description = "Decorators for Humans" category = "main" optional = false @@ -645,11 +645,11 @@ requests = ">=2.0" [[package]] name = "filelock" -version = "3.4.0" +version = "3.4.2" description = "A platform independent file lock." category = "main" optional = true -python-versions = ">=3.6" +python-versions = ">=3.7" [package.extras] docs = ["furo (>=2021.8.17b43)", "sphinx (>=4.1)", "sphinx-autodoc-typehints (>=1.12)"] @@ -789,7 +789,7 @@ test = ["betamax (>=0.8.0)", "pytest (>2.3.5)", "betamax-matchers (>=0.1.0)", "u [[package]] name = "gitpython" -version = "3.1.24" +version = "3.1.26" description = "GitPython is a python library used to interact with Git repositories" category = "dev" optional = false @@ -797,11 +797,11 @@ python-versions = ">=3.7" [package.dependencies] gitdb = ">=4.0.1,<5" -typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.10\""} +typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} [[package]] name = "google-api-core" -version = "2.2.2" +version = "2.3.2" description = "Google API client core library" category = "dev" optional = false @@ -869,7 +869,7 @@ grpc = ["grpcio (>=1.8.2,<2.0dev)"] [[package]] name = "google-cloud-storage" -version = "1.43.0" +version = "1.44.0" description = "Google Cloud Storage API client library" category = "dev" optional = false @@ -923,7 +923,7 @@ requests = ["requests (>=2.18.0,<3.0.0dev)"] [[package]] name = "googleapis-common-protos" -version = "1.53.0" +version = "1.54.0" description = "Common protobufs used in Google APIs" category = "dev" optional = false @@ -948,7 +948,7 @@ docs = ["sphinx"] [[package]] name = "grpcio" -version = "1.42.0" +version = "1.43.0" description = "HTTP/2-based RPC framework" category = "main" optional = false @@ -958,7 +958,7 @@ python-versions = ">=3.6" six = ">=1.5.2" [package.extras] -protobuf = ["grpcio-tools (>=1.42.0)"] +protobuf = ["grpcio-tools (>=1.43.0)"] [[package]] name = "h11" @@ -1050,11 +1050,11 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "importlib-metadata" -version = "4.8.2" +version = "4.10.0" description = "Read metadata from Python packages" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} @@ -1063,7 +1063,7 @@ zipp = ">=0.5" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] [[package]] name = "incremental" @@ -1094,7 +1094,7 @@ python-versions = "*" [[package]] name = "isodate" -version = "0.6.0" +version = "0.6.1" description = "An ISO 8601 date/time/duration parser and formatter" category = "dev" optional = false @@ -1332,7 +1332,7 @@ six = "*" [[package]] name = "moto" -version = "2.2.17" +version = "2.2.20" description = "A library that allows your python tests to easily mock out the boto library" category = "dev" optional = false @@ -1693,11 +1693,11 @@ packaging = ">=20.3,<21.0" [[package]] name = "pillow" -version = "8.4.0" +version = "9.0.0" description = "Python Imaging Library (Fork)" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [[package]] name = "pluggy" @@ -1740,7 +1740,7 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "3.19.1" +version = "3.19.3" description = "Protocol Buffers" category = "main" optional = false @@ -1748,7 +1748,7 @@ python-versions = ">=3.5" [[package]] name = "psutil" -version = "5.8.0" +version = "5.9.0" description = "Cross-platform lib for process and system monitoring in Python." category = "dev" optional = false @@ -1759,7 +1759,7 @@ test = ["ipaddress", "mock", "unittest2", "enum34", "pywin32", "wmi"] [[package]] name = "psycopg2-binary" -version = "2.9.2" +version = "2.9.3" description = "psycopg2 - Python-PostgreSQL Database Adapter" category = "main" optional = false @@ -2035,11 +2035,11 @@ testing = ["fields", "hunter", "process-tests", "six", "pytest-xdist", "virtuale [[package]] name = "pytest-forked" -version = "1.3.0" +version = "1.4.0" description = "run tests in isolated forked subprocesses" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.6" [package.dependencies] py = "*" @@ -2079,7 +2079,7 @@ pytest = ">=3.6.0" [[package]] name = "pytest-xdist" -version = "2.4.0" +version = "2.5.0" description = "pytest xdist plugin for distributed testing and loop-on-failing modes" category = "dev" optional = false @@ -2087,7 +2087,7 @@ python-versions = ">=3.6" [package.dependencies] execnet = ">=1.1" -pytest = ">=6.0.0" +pytest = ">=6.2.0" pytest-forked = "*" [package.extras] @@ -2116,7 +2116,7 @@ six = ">=1.5" [[package]] name = "python-engineio" -version = "4.3.0" +version = "4.3.1" description = "Engine.IO server and client for Python" category = "main" optional = false @@ -2128,7 +2128,7 @@ client = ["requests (>=2.21.0)", "websocket-client (>=0.54.0)"] [[package]] name = "python-socketio" -version = "5.5.0" +version = "5.5.1" description = "Socket.IO server and client for Python" category = "main" optional = false @@ -2354,7 +2354,7 @@ crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] [[package]] name = "sacremoses" -version = "0.0.46" +version = "0.0.47" description = "SacreMoses" category = "main" optional = true @@ -2369,7 +2369,7 @@ tqdm = "*" [[package]] name = "sanic" -version = "21.9.3" +version = "21.12.1" description = "A web server and web framework that's written to go fast. Build fast. Run fast." category = "main" optional = false @@ -2385,10 +2385,11 @@ uvloop = {version = ">=0.5.3", markers = "sys_platform != \"win32\" and implemen websockets = ">=10.0" [package.extras] -all = ["isort (>=5.0.0)", "beautifulsoup4", "chardet (>=3.0.0,<4.0.0)", "pytest-benchmark", "pytest-cov", "pytest-sugar", "m2r2", "pygments", "mypy (>=0.901)", "towncrier", "pytest (==5.2.1)", "uvicorn (<0.15.0)", "coverage (==5.3)", "pytest-sanic", "flake8", "sphinx (>=2.1.2)", "sanic-testing (>=0.7.0)", "bandit", "black", "gunicorn (==20.0.4)", "tox", "sphinx-rtd-theme (>=0.4.3)", "docutils", "types-ujson"] -dev = ["sanic-testing (>=0.7.0)", "pytest (==5.2.1)", "coverage (==5.3)", "gunicorn (==20.0.4)", "pytest-cov", "beautifulsoup4", "pytest-sanic", "pytest-sugar", "pytest-benchmark", "chardet (>=3.0.0,<4.0.0)", "flake8", "black", "isort (>=5.0.0)", "bandit", "mypy (>=0.901)", "docutils", "pygments", "uvicorn (<0.15.0)", "tox", "towncrier", "types-ujson"] -docs = ["sphinx (>=2.1.2)", "sphinx-rtd-theme (>=0.4.3)", "docutils", "pygments", "m2r2"] -test = ["sanic-testing (>=0.7.0)", "pytest (==5.2.1)", "coverage (==5.3)", "gunicorn (==20.0.4)", "pytest-cov", "beautifulsoup4", "pytest-sanic", "pytest-sugar", "pytest-benchmark", "chardet (>=3.0.0,<4.0.0)", "flake8", "black", "isort (>=5.0.0)", "bandit", "mypy (>=0.901)", "docutils", "pygments", "uvicorn (<0.15.0)", "types-ujson"] +all = ["mistune (<2.0.0)", "flake8", "isort (>=5.0.0)", "sanic-testing (>=0.7.0)", "towncrier", "bandit", "m2r2", "mypy (>=0.901,<0.910)", "pytest-cov", "sphinx (>=2.1.2)", "coverage (==5.3)", "pytest-sugar", "cryptography", "tox", "beautifulsoup4", "black", "pytest-benchmark", "docutils", "gunicorn (==20.0.4)", "pytest-sanic", "uvicorn (<0.15.0)", "pygments", "chardet (>=3.0.0,<4.0.0)", "sphinx-rtd-theme (>=0.4.3)", "pytest (==6.2.5)", "types-ujson"] +dev = ["sanic-testing (>=0.7.0)", "pytest (==6.2.5)", "coverage (==5.3)", "gunicorn (==20.0.4)", "pytest-cov", "beautifulsoup4", "pytest-sanic", "pytest-sugar", "pytest-benchmark", "chardet (>=3.0.0,<4.0.0)", "flake8", "black", "isort (>=5.0.0)", "bandit", "mypy (>=0.901,<0.910)", "docutils", "pygments", "uvicorn (<0.15.0)", "cryptography", "tox", "towncrier", "types-ujson"] +docs = ["sphinx (>=2.1.2)", "sphinx-rtd-theme (>=0.4.3)", "docutils", "pygments", "m2r2", "mistune (<2.0.0)"] +ext = ["sanic-ext"] +test = ["sanic-testing (>=0.7.0)", "pytest (==6.2.5)", "coverage (==5.3)", "gunicorn (==20.0.4)", "pytest-cov", "beautifulsoup4", "pytest-sanic", "pytest-sugar", "pytest-benchmark", "chardet (>=3.0.0,<4.0.0)", "flake8", "black", "isort (>=5.0.0)", "bandit", "mypy (>=0.901,<0.910)", "docutils", "pygments", "uvicorn (<0.15.0)", "types-ujson"] [[package]] name = "sanic-cors" @@ -2611,7 +2612,7 @@ python-versions = "*" [[package]] name = "spacy" -version = "3.2.0" +version = "3.2.1" description = "Industrial-strength Natural Language Processing (NLP) in Python" category = "main" optional = true @@ -2682,7 +2683,7 @@ wasabi = ">=0.8.1,<1.1.0" [[package]] name = "sqlalchemy" -version = "1.4.27" +version = "1.4.29" description = "Database Abstraction Library" category = "main" optional = false @@ -2786,7 +2787,7 @@ python-versions = ">=3.6" [[package]] name = "tensorboard-plugin-wit" -version = "1.8.0" +version = "1.8.1" description = "What-If Tool TensorBoard plugin." category = "main" optional = false @@ -2907,11 +2908,11 @@ python-versions = "*" [[package]] name = "terminaltables" -version = "3.1.0" +version = "3.1.10" description = "Generate simple tables in terminals from a nested list of strings." category = "main" optional = false -python-versions = "*" +python-versions = ">=2.6" [[package]] name = "thinc" @@ -3077,7 +3078,7 @@ python-versions = "*" [[package]] name = "typeguard" -version = "2.13.2" +version = "2.13.3" description = "Run-time type checker for Python" category = "main" optional = false @@ -3122,7 +3123,7 @@ python-versions = "*" [[package]] name = "types-pytz" -version = "2021.3.1" +version = "2021.3.4" description = "Typing stubs for pytz" category = "dev" optional = false @@ -3130,20 +3131,31 @@ python-versions = "*" [[package]] name = "types-requests" -version = "2.26.1" +version = "2.27.5" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" +[package.dependencies] +types-urllib3 = "<1.27" + [[package]] name = "types-setuptools" -version = "57.4.4" +version = "57.4.7" description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" +[[package]] +name = "types-urllib3" +version = "1.26.4" +description = "Typing stubs for urllib3" +category = "dev" +optional = false +python-versions = "*" + [[package]] name = "typing-extensions" version = "3.7.4.3" @@ -3192,7 +3204,7 @@ python-versions = ">=3.6" [[package]] name = "urllib3" -version = "1.26.7" +version = "1.26.8" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false @@ -3302,15 +3314,15 @@ typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} [[package]] name = "zipp" -version = "3.6.0" +version = "3.7.0" description = "Backport of pathlib-compatible object wrapper for zip files" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] [extras] full = ["spacy", "transformers", "jieba"] @@ -3322,7 +3334,7 @@ transformers = ["transformers"] [metadata] lock-version = "1.1" python-versions = ">=3.7,<3.9" -content-hash = "8cddaa2a91c9f3f1b5914a022b0edfe1c50ffcaca113bc203f9161feb718378b" +content-hash = "8174efe1009da64964e0bcde91c9f737ca4e3b30a113d7cabef6837d09d63d40" [metadata.files] absl-py = [ @@ -3330,8 +3342,8 @@ absl-py = [ {file = "absl_py-0.13.0-py3-none-any.whl", hash = "sha256:62bd4e248ddb19d81aec8f9446b407ff37c8175c2ba88266a7afa9b4ce4a333b"}, ] aio-pika = [ - {file = "aio-pika-6.8.0.tar.gz", hash = "sha256:1d4305a5f78af3857310b4fe48348cdcf6c097e0e275ea88c2cd08570531a369"}, - {file = "aio_pika-6.8.0-py3-none-any.whl", hash = "sha256:e69afef8695f47c5d107bbdba21bdb845d5c249acb3be53ef5c2d497b02657c0"}, + {file = "aio-pika-6.8.1.tar.gz", hash = "sha256:c2b2b46949a34252ff0e64c3bc208eef1893e5791b51aeefabf1676788d56b66"}, + {file = "aio_pika-6.8.1-py3-none-any.whl", hash = "sha256:059ab8ecc03d73997f64ed28df7269105984232174d0e6406389c4e8ed30941c"}, ] aiofiles = [ {file = "aiofiles-0.8.0-py3-none-any.whl", hash = "sha256:7a973fc22b29e9962d0897805ace5856e6a566ab1f0c8e5c91ff6c866519c937"}, @@ -3377,16 +3389,16 @@ aiohttp = [ {file = "aiohttp-3.7.4.tar.gz", hash = "sha256:5d84ecc73141d0a0d61ece0742bb7ff5751b0657dab8405f899d3ceb104cc7de"}, ] aioresponses = [ - {file = "aioresponses-0.7.2-py2.py3-none-any.whl", hash = "sha256:2f8ff624543066eb465b0238de68d29231e8488f41dc4b5a9dae190982cdae50"}, - {file = "aioresponses-0.7.2.tar.gz", hash = "sha256:82e495d118b74896aa5b4d47e17effb5e2cc783e510ae395ceade5e87cabe89a"}, + {file = "aioresponses-0.7.3-py2.py3-none-any.whl", hash = "sha256:7b1897169062c92fa87d6ecc503ac566ac87fbfacb2504f8ca81c8035a2eb068"}, + {file = "aioresponses-0.7.3.tar.gz", hash = "sha256:2c64ed5710ee8cb4e958c569184dad12f4c9cd5939135cb38f88c6a8261cceb3"}, ] aiormq = [ {file = "aiormq-3.3.1-py3-none-any.whl", hash = "sha256:e584dac13a242589aaf42470fd3006cb0dc5aed6506cbd20357c7ec8bbe4a89e"}, {file = "aiormq-3.3.1.tar.gz", hash = "sha256:8218dd9f7198d6e7935855468326bbacf0089f926c70baa8dd92944cb2496573"}, ] anyio = [ - {file = "anyio-3.4.0-py3-none-any.whl", hash = "sha256:2855a9423524abcdd652d942f8932fda1735210f77a6b392eafd9ff34d3fe020"}, - {file = "anyio-3.4.0.tar.gz", hash = "sha256:24adc69309fb5779bc1e06158e143e0b6d2c56b302a3ac3de3083c705a6ed39d"}, + {file = "anyio-3.5.0-py3-none-any.whl", hash = "sha256:b5fa16c5ff93fa1046f2eeb5bbff2dad4d3514d6cda61d02816dba34fa8c3c2e"}, + {file = "anyio-3.5.0.tar.gz", hash = "sha256:a0aeffe2fb1fdf374a8e4b471444f0f3ac4fb9f5a5b542b48824475e0042a5a6"}, ] appdirs = [ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, @@ -3417,8 +3429,8 @@ attrs = [ {file = "attrs-21.2.0.tar.gz", hash = "sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"}, ] azure-core = [ - {file = "azure-core-1.21.0.zip", hash = "sha256:84c58cb1194c34bb6c4b4f41dcf0430c11d316b84a0b978b28879577c422c5b6"}, - {file = "azure_core-1.21.0-py2.py3-none-any.whl", hash = "sha256:875419a1d2a373ed596ab517871a53b1006a5322d091d3c0ae8df3f92b82bbfa"}, + {file = "azure-core-1.21.1.zip", hash = "sha256:88d2db5cf9a135a7287dc45fdde6b96f9ca62c9567512a3bb3e20e322ce7deb2"}, + {file = "azure_core-1.21.1-py2.py3-none-any.whl", hash = "sha256:3d70e9ec64de92dfae330c15bc69085caceb2d83813ef6c01cc45326f2a4be83"}, ] azure-storage-blob = [ {file = "azure-storage-blob-12.8.1.zip", hash = "sha256:eb37b50ddfb6e558b29f6c8c03b0666514e55d6170bf4624e7261a3af93c6401"}, @@ -3437,7 +3449,7 @@ black = [ {file = "black-19.10b0.tar.gz", hash = "sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539"}, ] blis = [ - {file = "blis-0.7.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:98eba77b1e1fde7813bc0453ab78b6ae2067f5bc0fe9e3abc671b2895cfecf33"}, + {file = "blis-0.7.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5812a7c04561ae7332cf730f57d9f82cbd12c5f86a5bfad66ee244e51d06266d"}, {file = "blis-0.7.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eecfce3d8fce61dede7b0ae0dffa461c22072437b6cde85587db0c1aa75b450"}, {file = "blis-0.7.5-cp310-cp310-win_amd64.whl", hash = "sha256:0e476931f0d5703a21c77e7f69b8ebdeeea493fc7858a86f627ac2b376a12c8d"}, {file = "blis-0.7.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:5966ddf3bce84aa7bb09ce4ca059309602fa63280a5d5e5365bb2a294bd5a138"}, @@ -3455,12 +3467,12 @@ blis = [ {file = "blis-0.7.5.tar.gz", hash = "sha256:833e01e9eaff4c01aa6e049bbc1e6acb9eca6ee513d7b35b5bf135d49705ad33"}, ] boto3 = [ - {file = "boto3-1.20.20-py3-none-any.whl", hash = "sha256:6c173ffaf0604e34d6865edf7a9a71e1b3e79bd441b8b465ca4b2d44f840806d"}, - {file = "boto3-1.20.20.tar.gz", hash = "sha256:2c5377b6ab74eeccccd16f0f21537ede87b05c8322b0ccc852a68f36ea6c16c9"}, + {file = "boto3-1.20.32-py3-none-any.whl", hash = "sha256:b427f103f7d1488ad360b665687a032741d0a0c9ebe6f1a40dcdda95b646859a"}, + {file = "boto3-1.20.32.tar.gz", hash = "sha256:2c7eacd3edd38ec89456841edecd9c29382372180b675e43e22d16df6a582a4a"}, ] botocore = [ - {file = "botocore-1.23.20-py3-none-any.whl", hash = "sha256:98275e47c941cada6507089ecfe91e420972209b1deeceaf55a89ea50d046347"}, - {file = "botocore-1.23.20.tar.gz", hash = "sha256:22e1c7b4b2b8b11d7001ca5ef2b41bda9a8be46fb3cb994a2948462666ac5ef1"}, + {file = "botocore-1.23.32-py3-none-any.whl", hash = "sha256:a33e6b30c5d64156d10ec6986fc96dfdf5c723ad7dc83e0d60af6407192b16b4"}, + {file = "botocore-1.23.32.tar.gz", hash = "sha256:49c39a4e0ece6e486aefa23d7c96c5f9a4c41a6ceab3cc41b157e8b5bea749e3"}, ] cachecontrol = [ {file = "CacheControl-0.12.10-py2.py3-none-any.whl", hash = "sha256:b0d43d8f71948ef5ebdee5fe236b86c6ffc7799370453dccb0e894c20dfa487c"}, @@ -3558,7 +3570,8 @@ colorama = [ {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, ] colorclass = [ - {file = "colorclass-2.2.0.tar.gz", hash = "sha256:b05c2a348dfc1aff2d502527d78a5b7b7e2f85da94a96c5081210d8e9ee8e18b"}, + {file = "colorclass-2.2.2-py2.py3-none-any.whl", hash = "sha256:6f10c273a0ef7a1150b1120b6095cbdd68e5cf36dfd5d0fc957a2500bbf99a55"}, + {file = "colorclass-2.2.2.tar.gz", hash = "sha256:6d4fe287766166a98ca7bc6f6312daf04a0481b1eda43e7173484051c0ab4366"}, ] coloredlogs = [ {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, @@ -3647,7 +3660,7 @@ cycler = [ {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"}, ] cymem = [ - {file = "cymem-2.0.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2b4e27e739f09f16c7c0190f962ffe60dab39cb6a229d5c13e274d16f46a17e8"}, + {file = "cymem-2.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:700540b68e96a7056d0691d467df2bbaaf0934a3e6fe2383669998cbee19580a"}, {file = "cymem-2.0.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:971cf0a8437dfb4185c3049c086e463612fe849efadc0f5cc153fc81c501da7d"}, {file = "cymem-2.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:6b0d1a6b0a1296f31fa9e4b7ae5ea49394084ecc883b1ae6fec4844403c43468"}, {file = "cymem-2.0.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b8e1c18bb00800425576710468299153caad20c64ddb6819d40a6a34e21ee21c"}, @@ -3673,8 +3686,8 @@ dataclasses = [ {file = "dataclasses-0.6.tar.gz", hash = "sha256:6988bd2b895eef432d562370bb707d540f32f7360ab13da45340101bc2307d84"}, ] decorator = [ - {file = "decorator-5.1.0-py3-none-any.whl", hash = "sha256:7b12e7c3c6ab203a29e157335e9122cb03de9ab7264b137594103fd4a683b374"}, - {file = "decorator-5.1.0.tar.gz", hash = "sha256:e59913af105b9860aa2c8d3272d9de5a56a4e608db9a2f167a8480b323d529a7"}, + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, ] deprecated = [ {file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"}, @@ -3727,8 +3740,8 @@ fbmessenger = [ {file = "fbmessenger-6.0.0.tar.gz", hash = "sha256:6e42c4588a4c942547be228886278bbc7a084e0b34799c7e6ebd786129f021e6"}, ] filelock = [ - {file = "filelock-3.4.0-py3-none-any.whl", hash = "sha256:2e139a228bcf56dd8b2274a65174d005c4a6b68540ee0bdbb92c76f43f29f7e8"}, - {file = "filelock-3.4.0.tar.gz", hash = "sha256:93d512b32a23baf4cac44ffd72ccf70732aeff7b8050fcaf6d3ec406d954baf4"}, + {file = "filelock-3.4.2-py3-none-any.whl", hash = "sha256:cf0fc6a2f8d26bd900f19bf33915ca70ba4dd8c56903eeb14e1e7a2fd7590146"}, + {file = "filelock-3.4.2.tar.gz", hash = "sha256:38b4f4c989f9d06d44524df1b24bd19e167d851f19b50bf3e3559952dddc5b80"}, ] fire = [ {file = "fire-0.4.0.tar.gz", hash = "sha256:c5e2b8763699d1142393a46d0e3e790c5eb2f0706082df8f647878842c216a62"}, @@ -3769,12 +3782,12 @@ gitdb = [ {file = "github3.py-1.3.0.tar.gz", hash = "sha256:15a115c18f7bfcf934dfef7ab103844eb9f620c586bad65967708926da47cbda"}, ] gitpython = [ - {file = "GitPython-3.1.24-py3-none-any.whl", hash = "sha256:dc0a7f2f697657acc8d7f89033e8b1ea94dd90356b2983bca89dc8d2ab3cc647"}, - {file = "GitPython-3.1.24.tar.gz", hash = "sha256:df83fdf5e684fef7c6ee2c02fc68a5ceb7e7e759d08b694088d0cacb4eba59e5"}, + {file = "GitPython-3.1.26-py3-none-any.whl", hash = "sha256:26ac35c212d1f7b16036361ca5cff3ec66e11753a0d677fb6c48fa4e1a9dd8d6"}, + {file = "GitPython-3.1.26.tar.gz", hash = "sha256:fc8868f63a2e6d268fb25f481995ba185a85a66fcad126f039323ff6635669ee"}, ] google-api-core = [ - {file = "google-api-core-2.2.2.tar.gz", hash = "sha256:97349cc18c2bb2415f64f1353a80273a289a61294ce3eb2f7ce682d251bdd997"}, - {file = "google_api_core-2.2.2-py2.py3-none-any.whl", hash = "sha256:e7853735d4f51f4212d6bf9750620d76fc0106c0f271be0c3f43b73501c7ddf9"}, + {file = "google-api-core-2.3.2.tar.gz", hash = "sha256:c8889f45cf58deca522888ae1d39b2a25e93e7d1b019ae8cee6456d5c726a40c"}, + {file = "google_api_core-2.3.2-py2.py3-none-any.whl", hash = "sha256:3c562d393aed7e3d2011fcd1f103b490c411dcf5644b6312ca11a166a6ea8faf"}, ] google-auth = [ {file = "google-auth-2.3.3.tar.gz", hash = "sha256:d83570a664c10b97a1dc6f8df87e5fdfff012f48f62be131e449c20dfc32630e"}, @@ -3789,8 +3802,8 @@ google-cloud-core = [ {file = "google_cloud_core-2.2.1-py2.py3-none-any.whl", hash = "sha256:ab6cee07791afe4e210807ceeab749da6a076ab16d496ac734bf7e6ffea27486"}, ] google-cloud-storage = [ - {file = "google-cloud-storage-1.43.0.tar.gz", hash = "sha256:f3b4f4be5c8a1b5727a8f7136c94d3bacdd4b7bf11f9553f51ae4c1d876529d3"}, - {file = "google_cloud_storage-1.43.0-py2.py3-none-any.whl", hash = "sha256:bb3e4088054d50616bd57e4b81bb158db804c91faed39279d666e2fd07d2c118"}, + {file = "google-cloud-storage-1.44.0.tar.gz", hash = "sha256:29edbfeedd157d853049302bf5d104055c6f0cb7ef283537da3ce3f730073001"}, + {file = "google_cloud_storage-1.44.0-py2.py3-none-any.whl", hash = "sha256:cd4a223e9c18d771721a85c98a9c01b97d257edddff833ba63b7b1f0b9b4d6e9"}, ] google-crc32c = [ {file = "google-crc32c-1.3.0.tar.gz", hash = "sha256:276de6273eb074a35bc598f8efbc00c7869c5cf2e29c90748fccc8c898c244df"}, @@ -3847,8 +3860,8 @@ google-resumable-media = [ {file = "google_resumable_media-2.1.0-py2.py3-none-any.whl", hash = "sha256:cdc75ea0361e39704dc7df7da59fbd419e73c8bc92eac94d8a020d36baa9944b"}, ] googleapis-common-protos = [ - {file = "googleapis-common-protos-1.53.0.tar.gz", hash = "sha256:a88ee8903aa0a81f6c3cec2d5cf62d3c8aa67c06439b0496b49048fb1854ebf4"}, - {file = "googleapis_common_protos-1.53.0-py2.py3-none-any.whl", hash = "sha256:f6d561ab8fb16b30020b940e2dd01cd80082f4762fa9f3ee670f4419b4b8dbd0"}, + {file = "googleapis-common-protos-1.54.0.tar.gz", hash = "sha256:a4031d6ec6c2b1b6dc3e0be7e10a1bd72fb0b18b07ef9be7b51f2c1004ce2437"}, + {file = "googleapis_common_protos-1.54.0-py2.py3-none-any.whl", hash = "sha256:e54345a2add15dc5e1a7891c27731ff347b4c33765d79b5ed7026a6c0c7cbcae"}, ] greenlet = [ {file = "greenlet-1.1.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:58df5c2a0e293bf665a51f8a100d3e9956febfbf1d9aaf8c0677cf70218910c6"}, @@ -3903,50 +3916,50 @@ greenlet = [ {file = "greenlet-1.1.2.tar.gz", hash = "sha256:e30f5ea4ae2346e62cedde8794a56858a67b878dd79f7df76a0767e356b1744a"}, ] grpcio = [ - {file = "grpcio-1.42.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:6e5eec67909795f7b1ff2bd941bd6c2661ca5217ea9c35003d73314100786f60"}, - {file = "grpcio-1.42.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:8e8cd9909fdd232ecffb954936fd90c935ebe0b5fce36c88813f8247ce54019c"}, - {file = "grpcio-1.42.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:b4d7115ee08a36f3f50a6233bd78280e40847e078d2a5bb39c0ab0db4490d58f"}, - {file = "grpcio-1.42.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b781f412546830be55644f7c48251d30860f4725981862d4a1ea322f80d9cd34"}, - {file = "grpcio-1.42.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e62140c46d8125927c673c72c960cb387c02b2a1a3c6985a8b0a3914d27c0018"}, - {file = "grpcio-1.42.0-cp310-cp310-win32.whl", hash = "sha256:6b69726d7bbb633133c1b0d780b1357aa9b7a7f714fead6470bab1feb8012806"}, - {file = "grpcio-1.42.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6c0b159b38fcc3bbc3331105197c1f58ac0d294eb83910d136a325a85def88f"}, - {file = "grpcio-1.42.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:53e10d07e541073eb9a84d49ecffb831c3cbb970bcd8cd8de8431e935bf66c2e"}, - {file = "grpcio-1.42.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:7a3c9b8e13365529f9426d4754085e8a9c2ad718a41a46a97e4e30e87bb45eae"}, - {file = "grpcio-1.42.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:66f910b6324ae69625e63db2eb29d833c307cfa36736fe13d2f841656c5f658f"}, - {file = "grpcio-1.42.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:59163b8d2e0d85f0ecbee52b348f867eec7e0f909177564fb3956363f7e616e5"}, - {file = "grpcio-1.42.0-cp36-cp36m-manylinux_2_17_aarch64.whl", hash = "sha256:d92c1721c7981812d0f42dfc8248b15d3b6a2ea79eb8870776364423de2aa245"}, - {file = "grpcio-1.42.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65720d2bf05e2b78c4bffe372f13c41845bae5658fd3f5dd300c374dd240e5cb"}, - {file = "grpcio-1.42.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f385e40846ff81d1c6dce98dcc192c7988a46540758804c4a2e6da5a0e3e3e05"}, - {file = "grpcio-1.42.0-cp36-cp36m-win32.whl", hash = "sha256:ea3560ffbfe08327024380508190103937fef25e355d2259f8b5c003a0732f55"}, - {file = "grpcio-1.42.0-cp36-cp36m-win_amd64.whl", hash = "sha256:29fc36c99161ff307c8ca438346b2e36f81dac5ecdbabc983d0b255d7913fb19"}, - {file = "grpcio-1.42.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:76b5fa4c6d88f804456e763461cf7a1db38b200669f1ba00c579014ab5aa7965"}, - {file = "grpcio-1.42.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:d1451a8c0c01c5b5fdfeb8f777820cb277fb5d797d972f57a41bb82483c44a79"}, - {file = "grpcio-1.42.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:6655df5f31664bac4cd6c9b52f389fd92cd10025504ad83685038f47e11e29d8"}, - {file = "grpcio-1.42.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:5b9f0c4822e3a52a1663a315752c6bbdbed0ec15a660d3e64137335acbb5b7ce"}, - {file = "grpcio-1.42.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:7742606ac2bc03ed10360f4f630e0cc01dce864fe63557254e9adea21bb51416"}, - {file = "grpcio-1.42.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:603d71de14ab1f1fd1254b69ceda73545943461b1f51f82fda9477503330b6ea"}, - {file = "grpcio-1.42.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d08ce780bbd8d1a442d855bd681ed0f7483c65d2c8ed83701a9ea4f13678411f"}, - {file = "grpcio-1.42.0-cp37-cp37m-win32.whl", hash = "sha256:2aba7f93671ec971c5c70db81633b49a2f974aa09a2d811aede344a32bad1896"}, - {file = "grpcio-1.42.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2956da789d74fc35d2c869b3aa45dbf41c5d862c056ca8b5e35a688347ede809"}, - {file = "grpcio-1.42.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:21aa4a111b3381d3dd982a3df62348713b29f651aa9f6dfbc9415adbfe28d2ba"}, - {file = "grpcio-1.42.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:a6f9ed5320b93c029615b75f6c8caf2c76aa6545d8845f3813908892cfc5f84e"}, - {file = "grpcio-1.42.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:3a13953e12dc40ee247b5fe6ef22b5fac8f040a76b814a11bf9f423e82402f28"}, - {file = "grpcio-1.42.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:f721b42a20d886c03d9b1f461b228cdaf02ccf6c4550e263f7fd3ce3ff19a8f1"}, - {file = "grpcio-1.42.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:e2d9c6690d4c88cd51ee395d7ba5bd1d26d7c37e94cb59e7fd62ff21ecaf891d"}, - {file = "grpcio-1.42.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7f66eb220898787d7821a7931e35ae2512ed74f79f75adcd7ea2fb3119ca87d"}, - {file = "grpcio-1.42.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2e3f250e5398bf474c6e140df1b67494bf1e31c5277b5bf93841a564cbc22d0"}, - {file = "grpcio-1.42.0-cp38-cp38-win32.whl", hash = "sha256:06d5364e85e0fa50ee68bffd9c93a6aff869a91c68f1fd7ba1b944e063a0ff9f"}, - {file = "grpcio-1.42.0-cp38-cp38-win_amd64.whl", hash = "sha256:d58b3774ee2084c31aad140586a42e18328d9823959ca006a0b85ad7937fe405"}, - {file = "grpcio-1.42.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:b74bbac7e039cf23ed0c8edd820c31e90a52a22e28a03d45274a0956addde8d2"}, - {file = "grpcio-1.42.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2b264cf303a22c46f8d455f42425c546ad6ce22f183debb8d64226ddf1e039f4"}, - {file = "grpcio-1.42.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:64f2b3e6474e2ad865478b64f0850d15842acbb2623de5f78a60ceabe00c63e0"}, - {file = "grpcio-1.42.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:bf916ee93ea2fd52b5286ed4e19cbbde5e82754914379ea91dc5748550df3b4e"}, - {file = "grpcio-1.42.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:6ef72f0abdb89fb7c366a99e04823ecae5cda9f762f2234f42fc280447277cd6"}, - {file = "grpcio-1.42.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47ab65be9ba7a0beee94bbe2fb1dd03cb7832db9df4d1f8fae215a16b3edeb5e"}, - {file = "grpcio-1.42.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0209f30741de1875413f40e89bec9c647e7afad4a3549a6a1682c1ee23da68ca"}, - {file = "grpcio-1.42.0-cp39-cp39-win32.whl", hash = "sha256:5441d343602ce10ba48fcb36bb5de197a15a01dc9ee0f71c2a73cd5cd3d7f5ac"}, - {file = "grpcio-1.42.0-cp39-cp39-win_amd64.whl", hash = "sha256:17433f7eb01737240581b33fbc2eae7b7fa6d3429571782580bceaf05ec56cb8"}, - {file = "grpcio-1.42.0.tar.gz", hash = "sha256:4a8f2c7490fe3696e0cdd566e2f099fb91b51bc75446125175c55581c2f7bc11"}, + {file = "grpcio-1.43.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:a4e786a8ee8b30b25d70ee52cda6d1dbba2a8ca2f1208d8e20ed8280774f15c8"}, + {file = "grpcio-1.43.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:af9c3742f6c13575c0d4147a8454da0ff5308c4d9469462ff18402c6416942fe"}, + {file = "grpcio-1.43.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:fdac966699707b5554b815acc272d81e619dd0999f187cd52a61aef075f870ee"}, + {file = "grpcio-1.43.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e463b4aa0a6b31cf2e57c4abc1a1b53531a18a570baeed39d8d7b65deb16b7e"}, + {file = "grpcio-1.43.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11d05402e0ac3a284443d8a432d3dfc76a6bd3f7b5858cddd75617af2d7bd9b"}, + {file = "grpcio-1.43.0-cp310-cp310-win32.whl", hash = "sha256:c36f418c925a41fccada8f7ae9a3d3e227bfa837ddbfddd3d8b0ac252d12dda9"}, + {file = "grpcio-1.43.0-cp310-cp310-win_amd64.whl", hash = "sha256:772b943f34374744f70236bbbe0afe413ed80f9ae6303503f85e2b421d4bca92"}, + {file = "grpcio-1.43.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:cbc9b83211d905859dcf234ad39d7193ff0f05bfc3269c364fb0d114ee71de59"}, + {file = "grpcio-1.43.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:fb7229fa2a201a0c377ff3283174ec966da8f9fd7ffcc9a92f162d2e7fc9025b"}, + {file = "grpcio-1.43.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:17b75f220ee6923338155b4fcef4c38802b9a57bc57d112c9599a13a03e99f8d"}, + {file = "grpcio-1.43.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:6620a5b751b099b3b25553cfc03dfcd873cda06f9bb2ff7e9948ac7090e20f05"}, + {file = "grpcio-1.43.0-cp36-cp36m-manylinux_2_17_aarch64.whl", hash = "sha256:1898f999383baac5fcdbdef8ea5b1ef204f38dc211014eb6977ac6e55944d738"}, + {file = "grpcio-1.43.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47b6821238d8978014d23b1132713dac6c2d72cbb561cf257608b1673894f90a"}, + {file = "grpcio-1.43.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80398e9fb598060fa41050d1220f5a2440fe74ff082c36dda41ac3215ebb5ddd"}, + {file = "grpcio-1.43.0-cp36-cp36m-win32.whl", hash = "sha256:0110310eff07bb69782f53b7a947490268c4645de559034c43c0a635612e250f"}, + {file = "grpcio-1.43.0-cp36-cp36m-win_amd64.whl", hash = "sha256:45401d00f2ee46bde75618bf33e9df960daa7980e6e0e7328047191918c98504"}, + {file = "grpcio-1.43.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:af78ac55933811e6a25141336b1f2d5e0659c2f568d44d20539b273792563ca7"}, + {file = "grpcio-1.43.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8b2b9dc4d7897566723b77422e11c009a0ebd397966b165b21b89a62891a9fdf"}, + {file = "grpcio-1.43.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:77ef653f966934b3bfdd00e4f2064b68880eb40cf09b0b99edfa5ee22a44f559"}, + {file = "grpcio-1.43.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:e95b5d62ec26d0cd0b90c202d73e7cb927c369c3358e027225239a4e354967dc"}, + {file = "grpcio-1.43.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:04239e8f71db832c26bbbedb4537b37550a39d77681d748ab4678e58dd6455d6"}, + {file = "grpcio-1.43.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b4a7152187a49767a47d1413edde2304c96f41f7bc92cc512e230dfd0fba095"}, + {file = "grpcio-1.43.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8cc936a29c65ab39714e1ba67a694c41218f98b6e2a64efb83f04d9abc4386b"}, + {file = "grpcio-1.43.0-cp37-cp37m-win32.whl", hash = "sha256:577e024c8dd5f27cd98ba850bc4e890f07d4b5942e5bc059a3d88843a2f48f66"}, + {file = "grpcio-1.43.0-cp37-cp37m-win_amd64.whl", hash = "sha256:138f57e3445d4a48d9a8a5af1538fdaafaa50a0a3c243f281d8df0edf221dc02"}, + {file = "grpcio-1.43.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:08cf25f2936629db062aeddbb594bd76b3383ab0ede75ef0461a3b0bc3a2c150"}, + {file = "grpcio-1.43.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:01f4b887ed703fe82ebe613e1d2dadea517891725e17e7a6134dcd00352bd28c"}, + {file = "grpcio-1.43.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:0aa8285f284338eb68962fe1a830291db06f366ea12f213399b520c062b01f65"}, + {file = "grpcio-1.43.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:0edbfeb6729aa9da33ce7e28fb7703b3754934115454ae45e8cc1db601756fd3"}, + {file = "grpcio-1.43.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:c354017819201053d65212befd1dcb65c2d91b704d8977e696bae79c47cd2f82"}, + {file = "grpcio-1.43.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50cfb7e1067ee5e00b8ab100a6b7ea322d37ec6672c0455106520b5891c4b5f5"}, + {file = "grpcio-1.43.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57f1aeb65ed17dfb2f6cd717cc109910fe395133af7257a9c729c0b9604eac10"}, + {file = "grpcio-1.43.0-cp38-cp38-win32.whl", hash = "sha256:fa26a8bbb3fe57845acb1329ff700d5c7eaf06414c3e15f4cb8923f3a466ef64"}, + {file = "grpcio-1.43.0-cp38-cp38-win_amd64.whl", hash = "sha256:ade8b79a6b6aea68adb9d4bfeba5d647667d842202c5d8f3ba37ac1dc8e5c09c"}, + {file = "grpcio-1.43.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:124e718faf96fe44c98b05f3f475076be8b5198bb4c52a13208acf88a8548ba9"}, + {file = "grpcio-1.43.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2f96142d0abc91290a63ba203f01649e498302b1b6007c67bad17f823ecde0cf"}, + {file = "grpcio-1.43.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:31e6e489ccd8f08884b9349a39610982df48535881ec34f05a11c6e6b6ebf9d0"}, + {file = "grpcio-1.43.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:0e731f660e1e68238f56f4ce11156f02fd06dc58bc7834778d42c0081d4ef5ad"}, + {file = "grpcio-1.43.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:1f16725a320460435a8a5339d8b06c4e00d307ab5ad56746af2e22b5f9c50932"}, + {file = "grpcio-1.43.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4b4543e13acb4806917d883d0f70f21ba93b29672ea81f4aaba14821aaf9bb0"}, + {file = "grpcio-1.43.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:594aaa0469f4fca7773e80d8c27bf1298e7bbce5f6da0f084b07489a708f16ab"}, + {file = "grpcio-1.43.0-cp39-cp39-win32.whl", hash = "sha256:5449ae564349e7a738b8c38583c0aad954b0d5d1dd3cea68953bfc32eaee11e3"}, + {file = "grpcio-1.43.0-cp39-cp39-win_amd64.whl", hash = "sha256:bdf41550815a831384d21a498b20597417fd31bd084deb17d31ceb39ad9acc79"}, + {file = "grpcio-1.43.0.tar.gz", hash = "sha256:735d9a437c262ab039d02defddcb9f8f545d7009ae61c0114e19dda3843febe5"}, ] h11 = [ {file = "h11-0.12.0-py3-none-any.whl", hash = "sha256:36a3cb8c0a032f56e2da7084577878a035d3b61d104230d4bd49c0c6b555a9c6"}, @@ -4010,8 +4023,8 @@ idna = [ {file = "idna-2.10.tar.gz", hash = "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.8.2-py3-none-any.whl", hash = "sha256:53ccfd5c134223e497627b9815d5030edf77d2ed573922f7a0b8f8bb81a1c100"}, - {file = "importlib_metadata-4.8.2.tar.gz", hash = "sha256:75bdec14c397f528724c1bfd9709d660b33a4d2e77387a3358f20b848bb5e5fb"}, + {file = "importlib_metadata-4.10.0-py3-none-any.whl", hash = "sha256:b7cf7d3fef75f1e4c80a96ca660efbd51473d7e8f39b5ab9210febc7809012a4"}, + {file = "importlib_metadata-4.10.0.tar.gz", hash = "sha256:92a8b58ce734b2a4494878e0ecf7d79ccd7a128b5fc6014c401e0b61f006f0f6"}, ] incremental = [ {file = "incremental-21.3.0-py2.py3-none-any.whl", hash = "sha256:92014aebc6a20b78a8084cdd5645eeaa7f74b8933f70fa3ada2cfbd1e3b54321"}, @@ -4026,8 +4039,8 @@ ipaddress = [ {file = "ipaddress-1.0.23.tar.gz", hash = "sha256:b7f8e0369580bb4a24d5ba1d7cc29660a4a6987763faf1d8a8046830e020e7e2"}, ] isodate = [ - {file = "isodate-0.6.0-py2.py3-none-any.whl", hash = "sha256:aa4d33c06640f5352aca96e4b81afd8ab3b47337cc12089822d6f322ac772c81"}, - {file = "isodate-0.6.0.tar.gz", hash = "sha256:2e364a3d5759479cdb2d37cce6b9376ea504db2ff90252a2e5b7cc89cc9ff2d8"}, + {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, + {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, ] jieba = [ {file = "jieba-0.42.1.tar.gz", hash = "sha256:055ca12f62674fafed09427f176506079bc135638a14e23e25be909131928db2"}, @@ -4238,8 +4251,8 @@ mongomock = [ {file = "mongomock-3.23.0.tar.gz", hash = "sha256:d9945e7c87c221aed47c6c10708376351a5f5ee48060943c56ba195be425b0dd"}, ] moto = [ - {file = "moto-2.2.17-py2.py3-none-any.whl", hash = "sha256:73aa14a650cb3bf02ca720b343618a57dda4c2c1d1166708a4c5c98ea9013b29"}, - {file = "moto-2.2.17.tar.gz", hash = "sha256:221ebd16b41b3ae157554ca5e540a8c1b4b1c93443cbf854c1f04751194c51b6"}, + {file = "moto-2.2.20-py2.py3-none-any.whl", hash = "sha256:20f06e51dbdeb3ed76989f733e4e51472cbfe39dda031a49da66fc8df1bcefa4"}, + {file = "moto-2.2.20.tar.gz", hash = "sha256:aa6479df35b47d4df0c3214e118c328df67d9f4e157ec5523ce87a23fecf93f6"}, ] msgpack = [ {file = "msgpack-1.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:96acc674bb9c9be63fa8b6dabc3248fdc575c4adc005c440ad02f87ca7edd079"}, @@ -4356,7 +4369,7 @@ multidict = [ {file = "multidict-5.2.0.tar.gz", hash = "sha256:0dd1c93edb444b33ba2274b66f63def8a327d607c6c790772f448a53b6ea59ce"}, ] murmurhash = [ - {file = "murmurhash-1.0.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a814d559afe2a97ad40accf21ce96e8b04a3ff5a08f80c02b7acd427dbb7d567"}, + {file = "murmurhash-1.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1431d817e1fff1ed35f8dc54dd5b4d70165ec98076de8aca351805f8037293f3"}, {file = "murmurhash-1.0.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c7b8cc4a8db1c821b80f8ca70a25c3166b14d68ecef8693a117c6a0b1d74ace"}, {file = "murmurhash-1.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:e40790fdaf65213d70da4ed9229f16f6d6376310dc8fc23eacc98e6151c6ae7e"}, {file = "murmurhash-1.0.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a78d53f047c3410ce4c589d9b47090f628f844ed5694418144e63cfe7f3da7e9"}, @@ -4518,54 +4531,45 @@ pep440-version-utils = [ {file = "pep440_version_utils-0.3.0-py3-none-any.whl", hash = "sha256:73780b2c31adad5ca35c89eb008f51c2a47aee0318debe31391b673b90577e1b"}, ] pillow = [ - {file = "Pillow-8.4.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:81f8d5c81e483a9442d72d182e1fb6dcb9723f289a57e8030811bac9ea3fef8d"}, - {file = "Pillow-8.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3f97cfb1e5a392d75dd8b9fd274d205404729923840ca94ca45a0af57e13dbe6"}, - {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb9fc393f3c61f9054e1ed26e6fe912c7321af2f41ff49d3f83d05bacf22cc78"}, - {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d82cdb63100ef5eedb8391732375e6d05993b765f72cb34311fab92103314649"}, - {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62cc1afda735a8d109007164714e73771b499768b9bb5afcbbee9d0ff374b43f"}, - {file = "Pillow-8.4.0-cp310-cp310-win32.whl", hash = "sha256:e3dacecfbeec9a33e932f00c6cd7996e62f53ad46fbe677577394aaa90ee419a"}, - {file = "Pillow-8.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:620582db2a85b2df5f8a82ddeb52116560d7e5e6b055095f04ad828d1b0baa39"}, - {file = "Pillow-8.4.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:1bc723b434fbc4ab50bb68e11e93ce5fb69866ad621e3c2c9bdb0cd70e345f55"}, - {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72cbcfd54df6caf85cc35264c77ede902452d6df41166010262374155947460c"}, - {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70ad9e5c6cb9b8487280a02c0ad8a51581dcbbe8484ce058477692a27c151c0a"}, - {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25a49dc2e2f74e65efaa32b153527fc5ac98508d502fa46e74fa4fd678ed6645"}, - {file = "Pillow-8.4.0-cp36-cp36m-win32.whl", hash = "sha256:93ce9e955cc95959df98505e4608ad98281fff037350d8c2671c9aa86bcf10a9"}, - {file = "Pillow-8.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2e4440b8f00f504ee4b53fe30f4e381aae30b0568193be305256b1462216feff"}, - {file = "Pillow-8.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8c803ac3c28bbc53763e6825746f05cc407b20e4a69d0122e526a582e3b5e153"}, - {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a17b5d948f4ceeceb66384727dde11b240736fddeda54ca740b9b8b1556b29"}, - {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1394a6ad5abc838c5cd8a92c5a07535648cdf6d09e8e2d6df916dfa9ea86ead8"}, - {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:792e5c12376594bfcb986ebf3855aa4b7c225754e9a9521298e460e92fb4a488"}, - {file = "Pillow-8.4.0-cp37-cp37m-win32.whl", hash = "sha256:d99ec152570e4196772e7a8e4ba5320d2d27bf22fdf11743dd882936ed64305b"}, - {file = "Pillow-8.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7b7017b61bbcdd7f6363aeceb881e23c46583739cb69a3ab39cb384f6ec82e5b"}, - {file = "Pillow-8.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:d89363f02658e253dbd171f7c3716a5d340a24ee82d38aab9183f7fdf0cdca49"}, - {file = "Pillow-8.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0a0956fdc5defc34462bb1c765ee88d933239f9a94bc37d132004775241a7585"}, - {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b7bb9de00197fb4261825c15551adf7605cf14a80badf1761d61e59da347779"}, - {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72b9e656e340447f827885b8d7a15fc8c4e68d410dc2297ef6787eec0f0ea409"}, - {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5a4532a12314149d8b4e4ad8ff09dde7427731fcfa5917ff16d0291f13609df"}, - {file = "Pillow-8.4.0-cp38-cp38-win32.whl", hash = "sha256:82aafa8d5eb68c8463b6e9baeb4f19043bb31fefc03eb7b216b51e6a9981ae09"}, - {file = "Pillow-8.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:066f3999cb3b070a95c3652712cffa1a748cd02d60ad7b4e485c3748a04d9d76"}, - {file = "Pillow-8.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:5503c86916d27c2e101b7f71c2ae2cddba01a2cf55b8395b0255fd33fa4d1f1a"}, - {file = "Pillow-8.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4acc0985ddf39d1bc969a9220b51d94ed51695d455c228d8ac29fcdb25810e6e"}, - {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b052a619a8bfcf26bd8b3f48f45283f9e977890263e4571f2393ed8898d331b"}, - {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:493cb4e415f44cd601fcec11c99836f707bb714ab03f5ed46ac25713baf0ff20"}, - {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8831cb7332eda5dc89b21a7bce7ef6ad305548820595033a4b03cf3091235ed"}, - {file = "Pillow-8.4.0-cp39-cp39-win32.whl", hash = "sha256:5e9ac5f66616b87d4da618a20ab0a38324dbe88d8a39b55be8964eb520021e02"}, - {file = "Pillow-8.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:3eb1ce5f65908556c2d8685a8f0a6e989d887ec4057326f6c22b24e8a172c66b"}, - {file = "Pillow-8.4.0-pp36-pypy36_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ddc4d832a0f0b4c52fff973a0d44b6c99839a9d016fe4e6a1cb8f3eea96479c2"}, - {file = "Pillow-8.4.0-pp36-pypy36_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3e5ddc44c14042f0844b8cf7d2cd455f6cc80fd7f5eefbe657292cf601d9ad"}, - {file = "Pillow-8.4.0-pp36-pypy36_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c70e94281588ef053ae8998039610dbd71bc509e4acbc77ab59d7d2937b10698"}, - {file = "Pillow-8.4.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:3862b7256046fcd950618ed22d1d60b842e3a40a48236a5498746f21189afbbc"}, - {file = "Pillow-8.4.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4901622493f88b1a29bd30ec1a2f683782e57c3c16a2dbc7f2595ba01f639df"}, - {file = "Pillow-8.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c471a734240653a0ec91dec0996696eea227eafe72a33bd06c92697728046b"}, - {file = "Pillow-8.4.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:244cf3b97802c34c41905d22810846802a3329ddcb93ccc432870243211c79fc"}, - {file = "Pillow-8.4.0.tar.gz", hash = "sha256:b8e2f83c56e141920c39464b852de3719dfbfb6e3c99a2d8da0edf4fb33176ed"}, + {file = "Pillow-9.0.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:113723312215b25c22df1fdf0e2da7a3b9c357a7d24a93ebbe80bfda4f37a8d4"}, + {file = "Pillow-9.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bb47a548cea95b86494a26c89d153fd31122ed65255db5dcbc421a2d28eb3379"}, + {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31b265496e603985fad54d52d11970383e317d11e18e856971bdbb86af7242a4"}, + {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d154ed971a4cc04b93a6d5b47f37948d1f621f25de3e8fa0c26b2d44f24e3e8f"}, + {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fe92813d208ce8aa7d76da878bdc84b90809f79ccbad2a288e9bcbeac1d9bd"}, + {file = "Pillow-9.0.0-cp310-cp310-win32.whl", hash = "sha256:d5dcea1387331c905405b09cdbfb34611050cc52c865d71f2362f354faee1e9f"}, + {file = "Pillow-9.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:52abae4c96b5da630a8b4247de5428f593465291e5b239f3f843a911a3cf0105"}, + {file = "Pillow-9.0.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:72c3110228944019e5f27232296c5923398496b28be42535e3b2dc7297b6e8b6"}, + {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97b6d21771da41497b81652d44191489296555b761684f82b7b544c49989110f"}, + {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72f649d93d4cc4d8cf79c91ebc25137c358718ad75f99e99e043325ea7d56100"}, + {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aaf07085c756f6cb1c692ee0d5a86c531703b6e8c9cae581b31b562c16b98ce"}, + {file = "Pillow-9.0.0-cp37-cp37m-win32.whl", hash = "sha256:03b27b197deb4ee400ed57d8d4e572d2d8d80f825b6634daf6e2c18c3c6ccfa6"}, + {file = "Pillow-9.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a09a9d4ec2b7887f7a088bbaacfd5c07160e746e3d47ec5e8050ae3b2a229e9f"}, + {file = "Pillow-9.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:490e52e99224858f154975db61c060686df8a6b3f0212a678e5d2e2ce24675c9"}, + {file = "Pillow-9.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:500d397ddf4bbf2ca42e198399ac13e7841956c72645513e8ddf243b31ad2128"}, + {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ebd8b9137630a7bbbff8c4b31e774ff05bbb90f7911d93ea2c9371e41039b52"}, + {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd0e5062f11cb3e730450a7d9f323f4051b532781026395c4323b8ad055523c4"}, + {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f3b4522148586d35e78313db4db0df4b759ddd7649ef70002b6c3767d0fdeb7"}, + {file = "Pillow-9.0.0-cp38-cp38-win32.whl", hash = "sha256:0b281fcadbb688607ea6ece7649c5d59d4bbd574e90db6cd030e9e85bde9fecc"}, + {file = "Pillow-9.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:b5050d681bcf5c9f2570b93bee5d3ec8ae4cf23158812f91ed57f7126df91762"}, + {file = "Pillow-9.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:c2067b3bb0781f14059b112c9da5a91c80a600a97915b4f48b37f197895dd925"}, + {file = "Pillow-9.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2d16b6196fb7a54aff6b5e3ecd00f7c0bab1b56eee39214b2b223a9d938c50af"}, + {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98cb63ca63cb61f594511c06218ab4394bf80388b3d66cd61d0b1f63ee0ea69f"}, + {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc462d24500ba707e9cbdef436c16e5c8cbf29908278af053008d9f689f56dee"}, + {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3586e12d874ce2f1bc875a3ffba98732ebb12e18fb6d97be482bd62b56803281"}, + {file = "Pillow-9.0.0-cp39-cp39-win32.whl", hash = "sha256:68e06f8b2248f6dc8b899c3e7ecf02c9f413aab622f4d6190df53a78b93d97a5"}, + {file = "Pillow-9.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:6579f9ba84a3d4f1807c4aab4be06f373017fc65fff43498885ac50a9b47a553"}, + {file = "Pillow-9.0.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:47f5cf60bcb9fbc46011f75c9b45a8b5ad077ca352a78185bd3e7f1d294b98bb"}, + {file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fd8053e1f8ff1844419842fd474fc359676b2e2a2b66b11cc59f4fa0a301315"}, + {file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c5439bfb35a89cac50e81c751317faea647b9a3ec11c039900cd6915831064d"}, + {file = "Pillow-9.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95545137fc56ce8c10de646074d242001a112a92de169986abd8c88c27566a05"}, + {file = "Pillow-9.0.0.tar.gz", hash = "sha256:ee6e2963e92762923956fe5d3479b1fdc3b76c83f290aad131a2f98c3df0593e"}, ] pluggy = [ {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, ] preshed = [ - {file = "preshed-3.0.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a9683730127658b531120b4ed5cff1f2a567318ab75e9ab0f22cc84ae1486c23"}, + {file = "preshed-3.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:66a71ced487516cf81fd0431a3a843514262ae2f33e9a7688b87562258fa75d5"}, {file = "preshed-3.0.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c98f725d8478f3ade4ab1ea00f50a92d2d9406d37276bc46fd8bab1d47452c4"}, {file = "preshed-3.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:ea8aa9610837e907e8442e79300df0a861bfdb4dcaf026a5d9642a688ad04815"}, {file = "preshed-3.0.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e03ae3eee961106a517fcd827b5a7c51f7317236b3e665c989054ab8dc381d28"}, @@ -4588,98 +4592,124 @@ prompt-toolkit = [ {file = "prompt_toolkit-2.0.10.tar.gz", hash = "sha256:f15af68f66e664eaa559d4ac8a928111eebd5feda0c11738b5998045224829db"}, ] protobuf = [ - {file = "protobuf-3.19.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d80f80eb175bf5f1169139c2e0c5ada98b1c098e2b3c3736667f28cbbea39fc8"}, - {file = "protobuf-3.19.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a529e7df52204565bcd33738a7a5f288f3d2d37d86caa5d78c458fa5fabbd54d"}, - {file = "protobuf-3.19.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28ccea56d4dc38d35cd70c43c2da2f40ac0be0a355ef882242e8586c6d66666f"}, - {file = "protobuf-3.19.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8b30a7de128c46b5ecb343917d9fa737612a6e8280f440874e5cc2ba0d79b8f6"}, - {file = "protobuf-3.19.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5935c8ce02e3d89c7900140a8a42b35bc037ec07a6aeb61cc108be8d3c9438a6"}, - {file = "protobuf-3.19.1-cp36-cp36m-win32.whl", hash = "sha256:74f33edeb4f3b7ed13d567881da8e5a92a72b36495d57d696c2ea1ae0cfee80c"}, - {file = "protobuf-3.19.1-cp36-cp36m-win_amd64.whl", hash = "sha256:038daf4fa38a7e818dd61f51f22588d61755160a98db087a046f80d66b855942"}, - {file = "protobuf-3.19.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e51561d72efd5bd5c91490af1f13e32bcba8dab4643761eb7de3ce18e64a853"}, - {file = "protobuf-3.19.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:6e8ea9173403219239cdfd8d946ed101f2ab6ecc025b0fda0c6c713c35c9981d"}, - {file = "protobuf-3.19.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db3532d9f7a6ebbe2392041350437953b6d7a792de10e629c1e4f5a6b1fe1ac6"}, - {file = "protobuf-3.19.1-cp37-cp37m-win32.whl", hash = "sha256:615b426a177780ce381ecd212edc1e0f70db8557ed72560b82096bd36b01bc04"}, - {file = "protobuf-3.19.1-cp37-cp37m-win_amd64.whl", hash = "sha256:d8919368410110633717c406ab5c97e8df5ce93020cfcf3012834f28b1fab1ea"}, - {file = "protobuf-3.19.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:71b0250b0cfb738442d60cab68abc166de43411f2a4f791d31378590bfb71bd7"}, - {file = "protobuf-3.19.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:3cd0458870ea7d1c58e948ac8078f6ba8a7ecc44a57e03032ed066c5bb318089"}, - {file = "protobuf-3.19.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:655264ed0d0efe47a523e2255fc1106a22f6faab7cc46cfe99b5bae085c2a13e"}, - {file = "protobuf-3.19.1-cp38-cp38-win32.whl", hash = "sha256:b691d996c6d0984947c4cf8b7ae2fe372d99b32821d0584f0b90277aa36982d3"}, - {file = "protobuf-3.19.1-cp38-cp38-win_amd64.whl", hash = "sha256:e7e8d2c20921f8da0dea277dfefc6abac05903ceac8e72839b2da519db69206b"}, - {file = "protobuf-3.19.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fd390367fc211cc0ffcf3a9e149dfeca78fecc62adb911371db0cec5c8b7472d"}, - {file = "protobuf-3.19.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:d83e1ef8cb74009bebee3e61cc84b1c9cd04935b72bca0cbc83217d140424995"}, - {file = "protobuf-3.19.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36d90676d6f426718463fe382ec6274909337ca6319d375eebd2044e6c6ac560"}, - {file = "protobuf-3.19.1-cp39-cp39-win32.whl", hash = "sha256:e7b24c11df36ee8e0c085e5b0dc560289e4b58804746fb487287dda51410f1e2"}, - {file = "protobuf-3.19.1-cp39-cp39-win_amd64.whl", hash = "sha256:77d2fadcf369b3f22859ab25bd12bb8e98fb11e05d9ff9b7cd45b711c719c002"}, - {file = "protobuf-3.19.1-py2.py3-none-any.whl", hash = "sha256:e813b1c9006b6399308e917ac5d298f345d95bb31f46f02b60cd92970a9afa17"}, - {file = "protobuf-3.19.1.tar.gz", hash = "sha256:62a8e4baa9cb9e064eb62d1002eca820857ab2138440cb4b3ea4243830f94ca7"}, + {file = "protobuf-3.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1cb2ed66aac593adbf6dca4f07cd7ee7e2958b17bbc85b2cc8bc564ebeb258ec"}, + {file = "protobuf-3.19.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:898bda9cd37ec0c781b598891e86435de80c3bfa53eb483a9dac5a11ec93e942"}, + {file = "protobuf-3.19.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad761ef3be34c8bdc7285bec4b40372a8dad9e70cfbdc1793cd3cf4c1a4ce74"}, + {file = "protobuf-3.19.3-cp310-cp310-win32.whl", hash = "sha256:2cddcbcc222f3144765ccccdb35d3621dc1544da57a9aca7e1944c1a4fe3db11"}, + {file = "protobuf-3.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:6202df8ee8457cb00810c6e76ced480f22a1e4e02c899a14e7b6e6e1de09f938"}, + {file = "protobuf-3.19.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:397d82f1c58b76445469c8c06b8dee1ff67b3053639d054f52599a458fac9bc6"}, + {file = "protobuf-3.19.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e54b8650e849ee8e95e481024bff92cf98f5ec61c7650cb838d928a140adcb63"}, + {file = "protobuf-3.19.3-cp36-cp36m-win32.whl", hash = "sha256:3bf3a07d17ba3511fe5fa916afb7351f482ab5dbab5afe71a7a384274a2cd550"}, + {file = "protobuf-3.19.3-cp36-cp36m-win_amd64.whl", hash = "sha256:afa8122de8064fd577f49ae9eef433561c8ace97a0a7b969d56e8b1d39b5d177"}, + {file = "protobuf-3.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:18c40a1b8721026a85187640f1786d52407dc9c1ba8ec38accb57a46e84015f6"}, + {file = "protobuf-3.19.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:af7238849fa79285d448a24db686517570099739527a03c9c2971cce99cc5ae2"}, + {file = "protobuf-3.19.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e765e6dfbbb02c55e4d6d1145743401a84fc0b508f5a81b2c5a738cf86353139"}, + {file = "protobuf-3.19.3-cp37-cp37m-win32.whl", hash = "sha256:c781402ed5396ab56358d7b866d78c03a77cbc26ba0598d8bb0ac32084b1a257"}, + {file = "protobuf-3.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:544fe9705189b249380fae07952d220c97f5c6c9372a6f936cc83a79601dcb70"}, + {file = "protobuf-3.19.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:84bf3aa3efb00dbe1c7ed55da0f20800b0662541e582d7e62b3e1464d61ed365"}, + {file = "protobuf-3.19.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:3f80a3491eaca767cdd86cb8660dc778f634b44abdb0dffc9b2a8e8d0cd617d0"}, + {file = "protobuf-3.19.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9401d96552befcc7311f5ef8f0fa7dba0ef5fd805466b158b141606cd0ab6a8"}, + {file = "protobuf-3.19.3-cp38-cp38-win32.whl", hash = "sha256:ef02d112c025e83db5d1188a847e358beab3e4bbfbbaf10eaf69e67359af51b2"}, + {file = "protobuf-3.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:1291a0a7db7d792745c99d4657b4c5c4942695c8b1ac1bfb993a34035ec123f7"}, + {file = "protobuf-3.19.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:49677e5e9c7ea1245a90c2e8a00d304598f22ea3aa0628f0e0a530a9e70665fa"}, + {file = "protobuf-3.19.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:df2ba379ee42427e8fcc6a0a76843bff6efb34ef5266b17f95043939b5e25b69"}, + {file = "protobuf-3.19.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2acd7ca329be544d1a603d5f13a4e34a3791c90d651ebaf130ba2e43ae5397c6"}, + {file = "protobuf-3.19.3-cp39-cp39-win32.whl", hash = "sha256:b53519b2ebec70cfe24b4ddda21e9843f0918d7c3627a785393fb35d402ab8ad"}, + {file = "protobuf-3.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:8ceaf5fdb72c8e1fcb7be9f2b3b07482ce058a3548180c0bdd5c7e4ac5e14165"}, + {file = "protobuf-3.19.3-py2.py3-none-any.whl", hash = "sha256:f6d4b5b7595a57e69eb7314c67bef4a3c745b4caf91accaf72913d8e0635111b"}, + {file = "protobuf-3.19.3.tar.gz", hash = "sha256:d975a6314fbf5c524d4981e24294739216b5fb81ef3c14b86fb4b045d6690907"}, ] psutil = [ - {file = "psutil-5.8.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:0066a82f7b1b37d334e68697faba68e5ad5e858279fd6351c8ca6024e8d6ba64"}, - {file = "psutil-5.8.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0ae6f386d8d297177fd288be6e8d1afc05966878704dad9847719650e44fc49c"}, - {file = "psutil-5.8.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:12d844996d6c2b1d3881cfa6fa201fd635971869a9da945cf6756105af73d2df"}, - {file = "psutil-5.8.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:02b8292609b1f7fcb34173b25e48d0da8667bc85f81d7476584d889c6e0f2131"}, - {file = "psutil-5.8.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6ffe81843131ee0ffa02c317186ed1e759a145267d54fdef1bc4ea5f5931ab60"}, - {file = "psutil-5.8.0-cp27-none-win32.whl", hash = "sha256:ea313bb02e5e25224e518e4352af4bf5e062755160f77e4b1767dd5ccb65f876"}, - {file = "psutil-5.8.0-cp27-none-win_amd64.whl", hash = "sha256:5da29e394bdedd9144c7331192e20c1f79283fb03b06e6abd3a8ae45ffecee65"}, - {file = "psutil-5.8.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:74fb2557d1430fff18ff0d72613c5ca30c45cdbfcddd6a5773e9fc1fe9364be8"}, - {file = "psutil-5.8.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:74f2d0be88db96ada78756cb3a3e1b107ce8ab79f65aa885f76d7664e56928f6"}, - {file = "psutil-5.8.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:99de3e8739258b3c3e8669cb9757c9a861b2a25ad0955f8e53ac662d66de61ac"}, - {file = "psutil-5.8.0-cp36-cp36m-win32.whl", hash = "sha256:36b3b6c9e2a34b7d7fbae330a85bf72c30b1c827a4366a07443fc4b6270449e2"}, - {file = "psutil-5.8.0-cp36-cp36m-win_amd64.whl", hash = "sha256:52de075468cd394ac98c66f9ca33b2f54ae1d9bff1ef6b67a212ee8f639ec06d"}, - {file = "psutil-5.8.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c6a5fd10ce6b6344e616cf01cc5b849fa8103fbb5ba507b6b2dee4c11e84c935"}, - {file = "psutil-5.8.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:61f05864b42fedc0771d6d8e49c35f07efd209ade09a5afe6a5059e7bb7bf83d"}, - {file = "psutil-5.8.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:0dd4465a039d343925cdc29023bb6960ccf4e74a65ad53e768403746a9207023"}, - {file = "psutil-5.8.0-cp37-cp37m-win32.whl", hash = "sha256:1bff0d07e76114ec24ee32e7f7f8d0c4b0514b3fae93e3d2aaafd65d22502394"}, - {file = "psutil-5.8.0-cp37-cp37m-win_amd64.whl", hash = "sha256:fcc01e900c1d7bee2a37e5d6e4f9194760a93597c97fee89c4ae51701de03563"}, - {file = "psutil-5.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6223d07a1ae93f86451d0198a0c361032c4c93ebd4bf6d25e2fb3edfad9571ef"}, - {file = "psutil-5.8.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:d225cd8319aa1d3c85bf195c4e07d17d3cd68636b8fc97e6cf198f782f99af28"}, - {file = "psutil-5.8.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:28ff7c95293ae74bf1ca1a79e8805fcde005c18a122ca983abf676ea3466362b"}, - {file = "psutil-5.8.0-cp38-cp38-win32.whl", hash = "sha256:ce8b867423291cb65cfc6d9c4955ee9bfc1e21fe03bb50e177f2b957f1c2469d"}, - {file = "psutil-5.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:90f31c34d25b1b3ed6c40cdd34ff122b1887a825297c017e4cbd6796dd8b672d"}, - {file = "psutil-5.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6323d5d845c2785efb20aded4726636546b26d3b577aded22492908f7c1bdda7"}, - {file = "psutil-5.8.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:245b5509968ac0bd179287d91210cd3f37add77dad385ef238b275bad35fa1c4"}, - {file = "psutil-5.8.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:90d4091c2d30ddd0a03e0b97e6a33a48628469b99585e2ad6bf21f17423b112b"}, - {file = "psutil-5.8.0-cp39-cp39-win32.whl", hash = "sha256:ea372bcc129394485824ae3e3ddabe67dc0b118d262c568b4d2602a7070afdb0"}, - {file = "psutil-5.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:f4634b033faf0d968bb9220dd1c793b897ab7f1189956e1aa9eae752527127d3"}, - {file = "psutil-5.8.0.tar.gz", hash = "sha256:0c9ccb99ab76025f2f0bbecf341d4656e9c1351db8cc8a03ccd62e318ab4b5c6"}, + {file = "psutil-5.9.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:55ce319452e3d139e25d6c3f85a1acf12d1607ddedea5e35fb47a552c051161b"}, + {file = "psutil-5.9.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:7336292a13a80eb93c21f36bde4328aa748a04b68c13d01dfddd67fc13fd0618"}, + {file = "psutil-5.9.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:cb8d10461c1ceee0c25a64f2dd54872b70b89c26419e147a05a10b753ad36ec2"}, + {file = "psutil-5.9.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:7641300de73e4909e5d148e90cc3142fb890079e1525a840cf0dfd39195239fd"}, + {file = "psutil-5.9.0-cp27-none-win32.whl", hash = "sha256:ea42d747c5f71b5ccaa6897b216a7dadb9f52c72a0fe2b872ef7d3e1eacf3ba3"}, + {file = "psutil-5.9.0-cp27-none-win_amd64.whl", hash = "sha256:ef216cc9feb60634bda2f341a9559ac594e2eeaadd0ba187a4c2eb5b5d40b91c"}, + {file = "psutil-5.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90a58b9fcae2dbfe4ba852b57bd4a1dded6b990a33d6428c7614b7d48eccb492"}, + {file = "psutil-5.9.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff0d41f8b3e9ebb6b6110057e40019a432e96aae2008951121ba4e56040b84f3"}, + {file = "psutil-5.9.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:742c34fff804f34f62659279ed5c5b723bb0195e9d7bd9907591de9f8f6558e2"}, + {file = "psutil-5.9.0-cp310-cp310-win32.whl", hash = "sha256:8293942e4ce0c5689821f65ce6522ce4786d02af57f13c0195b40e1edb1db61d"}, + {file = "psutil-5.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:9b51917c1af3fa35a3f2dabd7ba96a2a4f19df3dec911da73875e1edaf22a40b"}, + {file = "psutil-5.9.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e9805fed4f2a81de98ae5fe38b75a74c6e6ad2df8a5c479594c7629a1fe35f56"}, + {file = "psutil-5.9.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c51f1af02334e4b516ec221ee26b8fdf105032418ca5a5ab9737e8c87dafe203"}, + {file = "psutil-5.9.0-cp36-cp36m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32acf55cb9a8cbfb29167cd005951df81b567099295291bcfd1027365b36591d"}, + {file = "psutil-5.9.0-cp36-cp36m-win32.whl", hash = "sha256:e5c783d0b1ad6ca8a5d3e7b680468c9c926b804be83a3a8e95141b05c39c9f64"}, + {file = "psutil-5.9.0-cp36-cp36m-win_amd64.whl", hash = "sha256:d62a2796e08dd024b8179bd441cb714e0f81226c352c802fca0fd3f89eeacd94"}, + {file = "psutil-5.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3d00a664e31921009a84367266b35ba0aac04a2a6cad09c550a89041034d19a0"}, + {file = "psutil-5.9.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7779be4025c540d1d65a2de3f30caeacc49ae7a2152108adeaf42c7534a115ce"}, + {file = "psutil-5.9.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:072664401ae6e7c1bfb878c65d7282d4b4391f1bc9a56d5e03b5a490403271b5"}, + {file = "psutil-5.9.0-cp37-cp37m-win32.whl", hash = "sha256:df2c8bd48fb83a8408c8390b143c6a6fa10cb1a674ca664954de193fdcab36a9"}, + {file = "psutil-5.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1d7b433519b9a38192dfda962dd8f44446668c009833e1429a52424624f408b4"}, + {file = "psutil-5.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c3400cae15bdb449d518545cbd5b649117de54e3596ded84aacabfbb3297ead2"}, + {file = "psutil-5.9.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2237f35c4bbae932ee98902a08050a27821f8f6dfa880a47195e5993af4702d"}, + {file = "psutil-5.9.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1070a9b287846a21a5d572d6dddd369517510b68710fca56b0e9e02fd24bed9a"}, + {file = "psutil-5.9.0-cp38-cp38-win32.whl", hash = "sha256:76cebf84aac1d6da5b63df11fe0d377b46b7b500d892284068bacccf12f20666"}, + {file = "psutil-5.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:3151a58f0fbd8942ba94f7c31c7e6b310d2989f4da74fcbf28b934374e9bf841"}, + {file = "psutil-5.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:539e429da49c5d27d5a58e3563886057f8fc3868a5547b4f1876d9c0f007bccf"}, + {file = "psutil-5.9.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58c7d923dc209225600aec73aa2c4ae8ea33b1ab31bc11ef8a5933b027476f07"}, + {file = "psutil-5.9.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3611e87eea393f779a35b192b46a164b1d01167c9d323dda9b1e527ea69d697d"}, + {file = "psutil-5.9.0-cp39-cp39-win32.whl", hash = "sha256:4e2fb92e3aeae3ec3b7b66c528981fd327fb93fd906a77215200404444ec1845"}, + {file = "psutil-5.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:7d190ee2eaef7831163f254dc58f6d2e2a22e27382b936aab51c835fc080c3d3"}, + {file = "psutil-5.9.0.tar.gz", hash = "sha256:869842dbd66bb80c3217158e629d6fceaecc3a3166d3d1faee515b05dd26ca25"}, ] psycopg2-binary = [ - {file = "psycopg2-binary-2.9.2.tar.gz", hash = "sha256:234b1f48488b2f86aac04fb00cb04e5e9bcb960f34fa8a8e41b73149d581a93b"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:c0e1fb7097ded2cc44d9037cfc68ad86a30341261492e7de95d180e534969fb2"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:717525cdc97b23182ff6f470fb5bf6f0bc796b5a7000c6f6699d6679991e4a5e"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3865d0cd919349c45603bd7e80249a382c5ecf8106304cfd153282adf9684b6a"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:daf6b5c62eb738872d61a1fa740d7768904911ba5a7e055ed72169d379b58beb"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-manylinux_2_24_ppc64le.whl", hash = "sha256:3ac83656ff4fbe7f2a956ab085e3eb1d678df54759965d509bdd6a06ce520d49"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-win32.whl", hash = "sha256:a04cfa231e7d9b63639e62166a4051cb47ca599fa341463fa3e1c48585fcee64"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:c6e16e085fe6dc6c099ee0be56657aa9ad71027465ef9591d302ba230c404c7e"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:53912199abb626a7249c662e72b70b4f57bf37f840599cec68625171435790dd"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:029e09a892b9ebc3c77851f69ce0720e1b72a9c6850460cee49b14dfbf9ccdd2"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db1b03c189f85b8df29030ad32d521dd7dcb862fd5f8892035314f5b886e70ce"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-manylinux_2_24_aarch64.whl", hash = "sha256:2eecbdc5fa5886f2dd6cc673ce4291cc0fb8900965315268960ad9c2477f8276"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-manylinux_2_24_ppc64le.whl", hash = "sha256:a77e98c68b0e6c51d4d6a994d22b30e77276cbd33e4aabdde03b9ad3a2c148aa"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-win32.whl", hash = "sha256:bf31e6fdb4ec1f6d98a07f48836508ed6edd19b48b13bbf168fbc1bd014b4ca2"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f9c37ecb173d76cf49e519133fd70851b8f9c38b6b8c1cb7fcfc71368d4cc6fc"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:a507db7758953b1b170c4310691a1a89877029b1e11b08ba5fc8ae3ddb35596b"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e4bbcfb403221ea1953f3e0a85cef00ed15c1683a66cf35c956a7e37c33a4c4"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4dff0f15af6936c6fe6da7067b4216edbbe076ad8625da819cc066591b1133c"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:8d2aafe46eb87742425ece38130510fbb035787ee89a329af299029c4d9ae318"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-manylinux_2_24_ppc64le.whl", hash = "sha256:37c8f00f7a2860bac9f7a54f03c243fc1dd9b367e5b2b52f5a02e5f4e9d8c49b"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-win32.whl", hash = "sha256:ef97578fab5115e3af4334dd3376dea3c3a79328a3314b21ec7ced02920b916d"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7e6bd4f532c2cd297b81114526176b240109a1c52020adca69c3f3226c65dc18"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:eeee7b18c51d02e49bf1984d7af26e8843fe68e31fa1cbab5366ebdfa1c89ade"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:497372cc76e6cbce2f51b37be141f360a321423c03eb9be45524b1d123f4cd11"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5671699aff57d22a245b7f4bba89e3de97dc841c5e98bd7f685429b2b20eca47"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:b9d45374ba98c1184df9cce93a0b766097544f8bdfcd5de83ff10f939c193125"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-manylinux_2_24_ppc64le.whl", hash = "sha256:a1852c5bef7e5f52bd43fde5eda610d4df0fb2efc31028150933e84b4140d47a"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-win32.whl", hash = "sha256:108b0380969ddab7c8ef2a813a57f87b308b2f88ec15f1a1e7b653964a3cfb25"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:14427437117f38e65f71db65d8eafd0e86837be456567798712b8da89db2b2dd"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:578c279cd1ce04f05ae0912530ece00bab92854911808e5aec27588aba87e361"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2dea4deac3dd3687e32daeb0712ee96c535970dfdded37a11de6a21145ab0e"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b592f09ff18cfcc9037b9a976fcd62db48cae9dbd5385f2471d4c2ba40c52b4d"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:3a320e7a804f3886a599fea507364aaafbb8387027fffcdfbd34d96316c806c7"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-manylinux_2_24_ppc64le.whl", hash = "sha256:7585ca73dcfe326f31fafa8f96e6bb98ea9e9e46c7a1924ec8101d797914ae27"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-win32.whl", hash = "sha256:9c0aaad07941419926b9bd00171e49fe6b06e42e5527fb91671e137fe6c93d77"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:aa2847d8073951dbc84c4f8b32c620764db3c2eb0d99a04835fecfab7d04816e"}, + {file = "psycopg2-binary-2.9.3.tar.gz", hash = "sha256:761df5313dc15da1502b21453642d7599d26be88bff659382f8f9747c7ebea4e"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:539b28661b71da7c0e428692438efbcd048ca21ea81af618d845e06ebfd29478"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e82d38390a03da28c7985b394ec3f56873174e2c88130e6966cb1c946508e65"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57804fc02ca3ce0dbfbef35c4b3a4a774da66d66ea20f4bda601294ad2ea6092"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:083a55275f09a62b8ca4902dd11f4b33075b743cf0d360419e2051a8a5d5ff76"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_24_ppc64le.whl", hash = "sha256:0a29729145aaaf1ad8bafe663131890e2111f13416b60e460dae0a96af5905c9"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a79d622f5206d695d7824cbf609a4f5b88ea6d6dab5f7c147fc6d333a8787e4"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:090f3348c0ab2cceb6dfbe6bf721ef61262ddf518cd6cc6ecc7d334996d64efa"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a9e1f75f96ea388fbcef36c70640c4efbe4650658f3d6a2967b4cc70e907352e"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c3ae8e75eb7160851e59adc77b3a19a976e50622e44fd4fd47b8b18208189d42"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-win32.whl", hash = "sha256:7b1e9b80afca7b7a386ef087db614faebbf8839b7f4db5eb107d0f1a53225029"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:8b344adbb9a862de0c635f4f0425b7958bf5a4b927c8594e6e8d261775796d53"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:e847774f8ffd5b398a75bc1c18fbb56564cda3d629fe68fd81971fece2d3c67e"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:68641a34023d306be959101b345732360fc2ea4938982309b786f7be1b43a4a1"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3303f8807f342641851578ee7ed1f3efc9802d00a6f83c101d21c608cb864460"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_24_aarch64.whl", hash = "sha256:e3699852e22aa68c10de06524a3721ade969abf382da95884e6a10ff798f9281"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_24_ppc64le.whl", hash = "sha256:526ea0378246d9b080148f2d6681229f4b5964543c170dd10bf4faaab6e0d27f"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:b1c8068513f5b158cf7e29c43a77eb34b407db29aca749d3eb9293ee0d3103ca"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:15803fa813ea05bef089fa78835118b5434204f3a17cb9f1e5dbfd0b9deea5af"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:152f09f57417b831418304c7f30d727dc83a12761627bb826951692cc6491e57"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:404224e5fef3b193f892abdbf8961ce20e0b6642886cfe1fe1923f41aaa75c9d"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-win32.whl", hash = "sha256:1f6b813106a3abdf7b03640d36e24669234120c72e91d5cbaeb87c5f7c36c65b"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:2d872e3c9d5d075a2e104540965a1cf898b52274a5923936e5bfddb58c59c7c2"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:10bb90fb4d523a2aa67773d4ff2b833ec00857f5912bafcfd5f5414e45280fb1"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a52ecab70af13e899f7847b3e074eeb16ebac5615665db33bce8a1009cf33"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a29b3ca4ec9defec6d42bf5feb36bb5817ba3c0230dd83b4edf4bf02684cd0ae"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:12b11322ea00ad8db8c46f18b7dfc47ae215e4df55b46c67a94b4effbaec7094"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_24_ppc64le.whl", hash = "sha256:53293533fcbb94c202b7c800a12c873cfe24599656b341f56e71dd2b557be063"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c381bda330ddf2fccbafab789d83ebc6c53db126e4383e73794c74eedce855ef"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9d29409b625a143649d03d0fd7b57e4b92e0ecad9726ba682244b73be91d2fdb"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:183a517a3a63503f70f808b58bfbf962f23d73b6dccddae5aa56152ef2bcb232"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:15c4e4cfa45f5a60599d9cec5f46cd7b1b29d86a6390ec23e8eebaae84e64554"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-win32.whl", hash = "sha256:adf20d9a67e0b6393eac162eb81fb10bc9130a80540f4df7e7355c2dd4af9fba"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2f9ffd643bc7349eeb664eba8864d9e01f057880f510e4681ba40a6532f93c71"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:def68d7c21984b0f8218e8a15d514f714d96904265164f75f8d3a70f9c295667"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dffc08ca91c9ac09008870c9eb77b00a46b3378719584059c034b8945e26b272"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:280b0bb5cbfe8039205c7981cceb006156a675362a00fe29b16fbc264e242834"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:af9813db73395fb1fc211bac696faea4ca9ef53f32dc0cfa27e4e7cf766dcf24"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_24_ppc64le.whl", hash = "sha256:63638d875be8c2784cfc952c9ac34e2b50e43f9f0a0660b65e2a87d656b3116c"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ffb7a888a047696e7f8240d649b43fb3644f14f0ee229077e7f6b9f9081635bd"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0c9d5450c566c80c396b7402895c4369a410cab5a82707b11aee1e624da7d004"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:d1c1b569ecafe3a69380a94e6ae09a4789bbb23666f3d3a08d06bbd2451f5ef1"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8fc53f9af09426a61db9ba357865c77f26076d48669f2e1bb24d85a22fb52307"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-win32.whl", hash = "sha256:6472a178e291b59e7f16ab49ec8b4f3bdada0a879c68d3817ff0963e722a82ce"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:35168209c9d51b145e459e05c31a9eaeffa9a6b0fd61689b48e07464ffd1a83e"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:47133f3f872faf28c1e87d4357220e809dfd3fa7c64295a4a148bcd1e6e34ec9"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91920527dea30175cc02a1099f331aa8c1ba39bf8b7762b7b56cbf54bc5cce42"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:887dd9aac71765ac0d0bac1d0d4b4f2c99d5f5c1382d8b770404f0f3d0ce8a39"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:1f14c8b0942714eb3c74e1e71700cbbcb415acbc311c730370e70c578a44a25c"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_24_ppc64le.whl", hash = "sha256:7af0dd86ddb2f8af5da57a976d27cd2cd15510518d582b478fbb2292428710b4"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:93cd1967a18aa0edd4b95b1dfd554cf15af657cb606280996d393dadc88c3c35"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bda845b664bb6c91446ca9609fc69f7db6c334ec5e4adc87571c34e4f47b7ddb"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:01310cf4cf26db9aea5158c217caa92d291f0500051a6469ac52166e1a16f5b7"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:99485cab9ba0fa9b84f1f9e1fef106f44a46ef6afdeec8885e0b88d0772b49e8"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-win32.whl", hash = "sha256:46f0e0a6b5fa5851bbd9ab1bc805eef362d3a230fbdfbc209f4a236d0a7a990d"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:accfe7e982411da3178ec690baaceaad3c278652998b2c45828aaac66cd8285f"}, ] py = [ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, @@ -4881,8 +4911,8 @@ pytest-cov = [ {file = "pytest_cov-2.12.1-py2.py3-none-any.whl", hash = "sha256:261bb9e47e65bd099c89c3edf92972865210c36813f80ede5277dceb77a4a62a"}, ] pytest-forked = [ - {file = "pytest-forked-1.3.0.tar.gz", hash = "sha256:6aa9ac7e00ad1a539c41bec6d21011332de671e938c7637378ec9710204e37ca"}, - {file = "pytest_forked-1.3.0-py2.py3-none-any.whl", hash = "sha256:dc4147784048e70ef5d437951728825a131b81714b398d5d52f17c7c144d8815"}, + {file = "pytest-forked-1.4.0.tar.gz", hash = "sha256:8b67587c8f98cbbadfdd804539ed5455b6ed03802203485dd2f53c1422d7440e"}, + {file = "pytest_forked-1.4.0-py3-none-any.whl", hash = "sha256:bbbb6717efc886b9d64537b41fb1497cfaf3c9601276be8da2cccfea5a3c8ad8"}, ] pytest-sanic = [] pytest-timeout = [ @@ -4890,8 +4920,8 @@ pytest-timeout = [ {file = "pytest_timeout-1.4.2-py2.py3-none-any.whl", hash = "sha256:541d7aa19b9a6b4e475c759fd6073ef43d7cdc9a92d95644c260076eb257a063"}, ] pytest-xdist = [ - {file = "pytest-xdist-2.4.0.tar.gz", hash = "sha256:89b330316f7fc475f999c81b577c2b926c9569f3d397ae432c0c2e2496d61ff9"}, - {file = "pytest_xdist-2.4.0-py3-none-any.whl", hash = "sha256:7b61ebb46997a0820a263553179d6d1e25a8c50d8a8620cd1aa1e20e3be99168"}, + {file = "pytest-xdist-2.5.0.tar.gz", hash = "sha256:4580deca3ff04ddb2ac53eba39d76cb5dd5edeac050cb6fbc768b0dd712b4edf"}, + {file = "pytest_xdist-2.5.0-py3-none-any.whl", hash = "sha256:6fe5c74fec98906deb8f2d2b616b5c782022744978e7bd4695d39c8f42d0ce65"}, ] python-crfsuite = [ {file = "python-crfsuite-0.9.7.tar.gz", hash = "sha256:3b4538d2ce5007e4e42005818247bf43ade89ef08a66d158462e2f7c5d63cee7"}, @@ -4931,12 +4961,12 @@ python-dateutil = [ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] python-engineio = [ - {file = "python-engineio-4.3.0.tar.gz", hash = "sha256:fed35eeacfa21f53f1fc05ef0cadd65a50780364da3a2be7650eb92f928fdb11"}, - {file = "python_engineio-4.3.0-py3-none-any.whl", hash = "sha256:ad06a975f7e14cb3bb7137cbf70fd883804484d29acd58004d1db1e2a7fc0ad3"}, + {file = "python-engineio-4.3.1.tar.gz", hash = "sha256:6e1d26977ffefe3b7da1b5df7a8750aedc7686da8201cd90daf36693db122489"}, + {file = "python_engineio-4.3.1-py3-none-any.whl", hash = "sha256:85986067cb9f7695347954d4e03491f7d45152c5428c07109a9707e04e8942cb"}, ] python-socketio = [ - {file = "python-socketio-5.5.0.tar.gz", hash = "sha256:ce972ea1b82aa1811fa10d30cf0d5c251b9a1558c3d66829b6fe70854bcccf0b"}, - {file = "python_socketio-5.5.0-py3-none-any.whl", hash = "sha256:ca28a0ff0ca5dd05ec5ba4ee2572fe06b96d6f0bc7df384d8b50fbbc06986134"}, + {file = "python-socketio-5.5.1.tar.gz", hash = "sha256:ac8e64d59a15d1c31a4fe8434f4ff16d0f640c824ba517dce7ca99e95f0cd36a"}, + {file = "python_socketio-5.5.1-py3-none-any.whl", hash = "sha256:d0b98474064ac239a618649ca67f5288827705d36bd5f7615a473b37965baf61"}, ] pytz = [ {file = "pytz-2021.3-py2.py3-none-any.whl", hash = "sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c"}, @@ -5092,12 +5122,11 @@ s3transfer = [ {file = "s3transfer-0.5.0.tar.gz", hash = "sha256:50ed823e1dc5868ad40c8dc92072f757aa0e653a192845c94a3b676f4a62da4c"}, ] sacremoses = [ - {file = "sacremoses-0.0.46-py3-none-any.whl", hash = "sha256:f95f80d09d3501fed5c1d3056d9212b40599b08cb27f185d38ff0063be8ddd09"}, - {file = "sacremoses-0.0.46.tar.gz", hash = "sha256:4b1fe8813915c7a2647728a46ebbf0d1a65eabb7ca05ba10efeb0548547eea38"}, + {file = "sacremoses-0.0.47-py2.py3-none-any.whl", hash = "sha256:7622c6e9fe12d45b7acf4528451bd054c1557c1f6779398f9cd9f28332d92a0b"}, ] sanic = [ - {file = "sanic-21.9.3-py3-none-any.whl", hash = "sha256:354fbc9e5382df23989752067a57d042aef99bf5a8e85593a3e0112276bff9e8"}, - {file = "sanic-21.9.3.tar.gz", hash = "sha256:5edb41d0d30cf47a25cf991b7465f53ea161b43e3cd20d8985e68ecf7fb7ad24"}, + {file = "sanic-21.12.1-py3-none-any.whl", hash = "sha256:53230ba1a1081e6b075e58339a47c07d2bf618119fa40ba88c69fe57c99126f1"}, + {file = "sanic-21.12.1.tar.gz", hash = "sha256:6f15ecfef47d4288aac04d8741833a7701cd94f2d4a28fc5667ce2844363152d"}, ] sanic-cors = [ {file = "Sanic-Cors-1.0.1.tar.gz", hash = "sha256:ac5d40bd45022d21296887d2238891d0ad67308ffba55be809df0151d36071b5"}, @@ -5155,9 +5184,6 @@ scikit-learn = [ {file = "scikit_learn-0.24.2-cp39-cp39-win_amd64.whl", hash = "sha256:40556bea1ef26ef54bc678d00cf138a63069144a0b5f3a436eecd8f3468b903e"}, ] scipy = [ - {file = "scipy-1.7.3-1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:c9e04d7e9b03a8a6ac2045f7c5ef741be86727d8f49c45db45f244bdd2bcff17"}, - {file = "scipy-1.7.3-1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b0e0aeb061a1d7dcd2ed59ea57ee56c9b23dd60100825f98238c06ee5cc4467e"}, - {file = "scipy-1.7.3-1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:b78a35c5c74d336f42f44106174b9851c783184a85a3fe3e68857259b37b9ffb"}, {file = "scipy-1.7.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:173308efba2270dcd61cd45a30dfded6ec0085b4b6eb33b5eb11ab443005e088"}, {file = "scipy-1.7.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:21b66200cf44b1c3e86495e3a436fc7a26608f92b8d43d344457c54f1c024cbc"}, {file = "scipy-1.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceebc3c4f6a109777c0053dfa0282fddb8893eddfb0d598574acfb734a926168"}, @@ -5269,22 +5295,22 @@ sortedcontainers = [ {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, ] spacy = [ - {file = "spacy-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eec8e66278fe50a1c0e488c05a583b8a755371999319e002ad08da2ba079b1f5"}, - {file = "spacy-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:208ea0ccc8015c5d4f3d11de627f6adec36058b508756bcdc9372d9ca8e1161d"}, - {file = "spacy-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:9a7c42a548b09d4c280283b33239a89c130f1662bb32b10270438a44dd3fb4bf"}, - {file = "spacy-3.2.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:729cd9bb2fbb04b43e5cffc6f344cc3451e521d78689afc66df4d1cf18e9393a"}, - {file = "spacy-3.2.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2cef9524d1516c143a586be61888b4851dad893e4d1f609050eacb3aa3cc84fb"}, - {file = "spacy-3.2.0-cp36-cp36m-win_amd64.whl", hash = "sha256:db79c01405a54bcc832a5308fbe1f060e004f13ea6a86d483a265d4632b90e72"}, - {file = "spacy-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0750590697ea4588b41cdb8db66baed37d46412f8f385572edb3756106005423"}, - {file = "spacy-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf2fddb68f2465e98a2ca756861f28a41e13452d7e3a0cd3286396267401d4f5"}, - {file = "spacy-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:5cb3ef623511d58759b87fbece2bf6f19d46ce594720c05230185fff6e65f28f"}, - {file = "spacy-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f908937ff8066d73162ea4649f644c312df8a8c2bff0219b98c75fa57054bcbc"}, - {file = "spacy-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef56f77c47389451a9bfa56cc79ac9bd17385fc7ed9e1c08c0cf1ad41dd19a1"}, - {file = "spacy-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:a3956bce38773a685380b79aa89420269309332280588ca012634ed8831724aa"}, - {file = "spacy-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:19eb4f24791c53e9bf7ef5b4112dfe6b2bea9dfc859269344b437e37de2e4654"}, - {file = "spacy-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c5231003ad62e621da080a79eabdddcc38a11964d79c2b70f7a38bcb4b47aed"}, - {file = "spacy-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:a5746672265470300c099a2d2ea2442ba0b84870106d21c69568561db38bba1d"}, - {file = "spacy-3.2.0.tar.gz", hash = "sha256:68e54b2a14ce74eeecea9bfb0b9bdadf8a4a8157765dbefa7e50d25a1bf0f2f3"}, + {file = "spacy-3.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a88d8f3b8849e3097e9e077e752dafcad8f5e557c9aba7099a83891e2153d141"}, + {file = "spacy-3.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91ceedc85846d3e36c0c95142ec2e675bc725c9d0035c0cdb21b40261d047516"}, + {file = "spacy-3.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:cb4ac55c27741a35e1af6f9718d1562d4000256a5673b0a2474b57998cc71825"}, + {file = "spacy-3.2.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:62b822ba097a2f2b61161306a7a9957ccc79dfed6a74ae7eb7572b3c3c153c03"}, + {file = "spacy-3.2.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00d177d58f49df8199bff58ed2bd37b25b1810f53bde41279030c506d8b3ac72"}, + {file = "spacy-3.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:868abe6d5f8ace6059ca4f4eeb6628c18ca40f9446b6fec50e86c42ac234c20d"}, + {file = "spacy-3.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a8edc17de654f676e183173cf54325d22a13a832bd15d77c75a561578930123d"}, + {file = "spacy-3.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf6c35bf6d80dd2d001ff4ff2f6d73e676eec36fe3da4d006c14fe2f96a80f8"}, + {file = "spacy-3.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:cad15271b336c006ed6e4422b02283362cf92fc117886bccfb2d97291fd0c5e4"}, + {file = "spacy-3.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:88d8440ec629df87ab3cc645e292eb9046c03a5b3255141401d367e494d5a51e"}, + {file = "spacy-3.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c8ca5b80e7b0ea3d485a44573d28a18173bf6b808661e4d740d02a985c3bf0d"}, + {file = "spacy-3.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:84b97d86d4a9afbb0b6b7623061ae79ac607460d80aa20eee477e33faf019a67"}, + {file = "spacy-3.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e9f2a5049843a92f4020452c82b697921d26d95560fd185e2d0336cb2607a5df"}, + {file = "spacy-3.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d12b3e6568b7fde01b62ed42d0d578779774d091a33764d0f29def9b75363f81"}, + {file = "spacy-3.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:a9df95ba949774ce6d34a4dacc766068bd812b1081a99375aa7db63df04527dc"}, + {file = "spacy-3.2.1.tar.gz", hash = "sha256:f6ebac511627740a8ca2b117b91ef5515c8f0b2fb117a69ebe01d010dd4fc53c"}, ] spacy-legacy = [ {file = "spacy-legacy-3.0.8.tar.gz", hash = "sha256:b4725c5c161f0685ab4fce3fc912bc68aefdb7e102ba9848e852bb5842256c2f"}, @@ -5295,45 +5321,45 @@ spacy-loggers = [ {file = "spacy_loggers-1.0.1-py3-none-any.whl", hash = "sha256:5e610c980efb831fa428c24fd659e5dd850ea6140c9ed987efe0e8d26df3ee7c"}, ] sqlalchemy = [ - {file = "SQLAlchemy-1.4.27-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:6afa9e4e63f066e0fd90a21db7e95e988d96127f52bfb298a0e9bec6999357a9"}, - {file = "SQLAlchemy-1.4.27-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ec1c908fa721f2c5684900cc8ff75555b1a5a2ae4f5a5694eb0e37a5263cea44"}, - {file = "SQLAlchemy-1.4.27-cp27-cp27m-win32.whl", hash = "sha256:0438bccc16349db2d5203598be6073175ce16d4e53b592d6e6cef880c197333e"}, - {file = "SQLAlchemy-1.4.27-cp27-cp27m-win_amd64.whl", hash = "sha256:435b1980c1333ffe3ab386ad28d7b209590b0fa83ea8544d853e7a22f957331b"}, - {file = "SQLAlchemy-1.4.27-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:486f7916ef77213103467924ef25f5ea1055ae901f385fe4d707604095fdf6a9"}, - {file = "SQLAlchemy-1.4.27-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:d81c84c9d2523b3ea20f8e3aceea68615768a7464c0f9a9899600ce6592ec570"}, - {file = "SQLAlchemy-1.4.27-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5881644fc51af7b232ab8d64f75c0f32295dfe88c2ee188023795cdbd4cf99b"}, - {file = "SQLAlchemy-1.4.27-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:24828c5e74882cf41516740c0b150702bee4c6817d87d5c3d3bafef2e6896f80"}, - {file = "SQLAlchemy-1.4.27-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7d0a1b1258efff7d7f2e6cfa56df580d09ba29d35a1e3f604f867e1f685feb2"}, - {file = "SQLAlchemy-1.4.27-cp310-cp310-win32.whl", hash = "sha256:aadc6d1e58e14010ae4764d1ba1fd0928dbb9423b27a382ea3a1444f903f4084"}, - {file = "SQLAlchemy-1.4.27-cp310-cp310-win_amd64.whl", hash = "sha256:9134e5810262203388b203c2022bbcbf1a22e89861eef9340e772a73dd9076fa"}, - {file = "SQLAlchemy-1.4.27-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:fa52534076394af7315306a8701b726a6521b591d95e8f4e5121c82f94790e8d"}, - {file = "SQLAlchemy-1.4.27-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2717ceae35e71de1f58b0d1ee7e773d3aab5c403c6e79e8d262277c7f7f95269"}, - {file = "SQLAlchemy-1.4.27-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2e93624d186ea7a738ada47314701c8830e0e4b021a6bce7fbe6f39b87ee1516"}, - {file = "SQLAlchemy-1.4.27-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:987fe2f84ceaf744fa0e48805152abe485a9d7002c9923b18a4b2529c7bff218"}, - {file = "SQLAlchemy-1.4.27-cp36-cp36m-win32.whl", hash = "sha256:2146ef996181e3d4dd20eaf1d7325eb62d6c8aa4dc1677c1872ddfa8561a47d9"}, - {file = "SQLAlchemy-1.4.27-cp36-cp36m-win_amd64.whl", hash = "sha256:ad8ec6b69d03e395db48df8991aa15fce3cd23e378b73e01d46a26a6efd5c26d"}, - {file = "SQLAlchemy-1.4.27-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:52f23a76544ed29573c0f3ee41f0ca1aedbab3a453102b60b540cc6fa55448ad"}, - {file = "SQLAlchemy-1.4.27-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd421a14edf73cfe01e8f51ed8966294ee3b3db8da921cacc88e497fd6e977af"}, - {file = "SQLAlchemy-1.4.27-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:10230364479429437f1b819a8839f1edc5744c018bfeb8d01320930f97695bc9"}, - {file = "SQLAlchemy-1.4.27-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78943451ab3ffd0e27876f9cea2b883317518b418f06b90dadf19394534637e9"}, - {file = "SQLAlchemy-1.4.27-cp37-cp37m-win32.whl", hash = "sha256:a81e40dfa50ed3c472494adadba097640bfcf43db160ed783132045eb2093cb1"}, - {file = "SQLAlchemy-1.4.27-cp37-cp37m-win_amd64.whl", hash = "sha256:015511c52c650eebf1059ed8a21674d9d4ae567ebfd80fc73f8252faccd71864"}, - {file = "SQLAlchemy-1.4.27-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:cc49fb8ff103900c20e4a9c53766c82a7ebbc183377fb357a8298bad216e9cdd"}, - {file = "SQLAlchemy-1.4.27-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9369f927f4d19b58322cfea8a51710a3f7c47a0e7f3398d94a4632760ecd74f6"}, - {file = "SQLAlchemy-1.4.27-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6510f4a5029643301bdfe56b61e806093af2101d347d485c42a5535847d2c699"}, - {file = "SQLAlchemy-1.4.27-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:771eca9872b47a629010665ff92de1c248a6979b8d1603daced37773d6f6e365"}, - {file = "SQLAlchemy-1.4.27-cp38-cp38-win32.whl", hash = "sha256:4d1d707b752137e6bf45720648e1b828d5e4881d690df79cca07f7217ea06365"}, - {file = "SQLAlchemy-1.4.27-cp38-cp38-win_amd64.whl", hash = "sha256:c035184af4e58e154b0977eea52131edd096e0754a88f7d5a847e7ccb3510772"}, - {file = "SQLAlchemy-1.4.27-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:bac949be7579fed824887eed6672f44b7c4318abbfb2004b2c6968818b535a2f"}, - {file = "SQLAlchemy-1.4.27-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ac8306e04275d382d6393e557047b0a9d7ddf9f7ca5da9b3edbd9323ea75bd9"}, - {file = "SQLAlchemy-1.4.27-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8327e468b1775c0dfabc3d01f39f440585bf4d398508fcbbe2f0d931c502337d"}, - {file = "SQLAlchemy-1.4.27-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b02eee1577976acb4053f83d32b7826424f8b9f70809fa756529a52c6537eda4"}, - {file = "SQLAlchemy-1.4.27-cp39-cp39-win32.whl", hash = "sha256:5beeff18b4e894f6cb73c8daf2c0d8768844ef40d97032bb187d75b1ec8de24b"}, - {file = "SQLAlchemy-1.4.27-cp39-cp39-win_amd64.whl", hash = "sha256:8dbe5f639e6d035778ebf700be6d573f82a13662c3c2c3aa0f1dba303b942806"}, - {file = "SQLAlchemy-1.4.27.tar.gz", hash = "sha256:d768359daeb3a86644f3854c6659e4496a3e6bba2b4651ecc87ce7ad415b320c"}, + {file = "SQLAlchemy-1.4.29-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:da64423c05256f4ab8c0058b90202053b201cbe3a081f3a43eb590cd554395ab"}, + {file = "SQLAlchemy-1.4.29-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:0fc4eec2f46b40bdd42112b3be3fbbf88e194bcf02950fbb88bcdc1b32f07dc7"}, + {file = "SQLAlchemy-1.4.29-cp27-cp27m-win32.whl", hash = "sha256:101d2e100ba9182c9039699588e0b2d833c54b3bad46c67c192159876c9f27ea"}, + {file = "SQLAlchemy-1.4.29-cp27-cp27m-win_amd64.whl", hash = "sha256:ceac84dd9abbbe115e8be0c817bed85d9fa639b4d294e7817f9e61162d5f766c"}, + {file = "SQLAlchemy-1.4.29-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:15b65887b6c324cad638c7671cb95985817b733242a7eb69edd7cdf6953be1e0"}, + {file = "SQLAlchemy-1.4.29-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:78abc507d17753ed434b6cc0c0693126279723d5656d9775bfcac966a99a899b"}, + {file = "SQLAlchemy-1.4.29-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb8c993706e86178ce15a6b86a335a2064f52254b640e7f53365e716423d33f4"}, + {file = "SQLAlchemy-1.4.29-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:804e22d5b6165a4f3f019dd9c94bec5687de985a9c54286b93ded9f7846b8c82"}, + {file = "SQLAlchemy-1.4.29-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56d9d62021946263d4478c9ca012fbd1805f10994cb615c88e7bfd1ae14604d8"}, + {file = "SQLAlchemy-1.4.29-cp310-cp310-win32.whl", hash = "sha256:027f356c727db24f3c75828c7feb426f87ce1241242d08958e454bd025810660"}, + {file = "SQLAlchemy-1.4.29-cp310-cp310-win_amd64.whl", hash = "sha256:debaf09a823061f88a8dee04949814cf7e82fb394c5bca22c780cb03172ca23b"}, + {file = "SQLAlchemy-1.4.29-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:dc27dcc6c72eb38be7f144e9c2c4372d35a3684d3a6dd43bd98c1238358ee17c"}, + {file = "SQLAlchemy-1.4.29-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4ddd4f2e247128c58bb3dd4489922874afce157d2cff0b2295d67fcd0f22494"}, + {file = "SQLAlchemy-1.4.29-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9ce960a1dc60524136cf6f75621588e2508a117e04a6e3eedb0968bd13b8c824"}, + {file = "SQLAlchemy-1.4.29-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5919e647e1d4805867ea556ed4967c68b4d8b266059fa35020dbaed8ffdd60f3"}, + {file = "SQLAlchemy-1.4.29-cp36-cp36m-win32.whl", hash = "sha256:886359f734b95ad1ef443b13bb4518bcade4db4f9553c9ce33d6d04ebda8d44e"}, + {file = "SQLAlchemy-1.4.29-cp36-cp36m-win_amd64.whl", hash = "sha256:e9cc6d844e24c307c3272677982a9b33816aeb45e4977791c3bdd47637a8d810"}, + {file = "SQLAlchemy-1.4.29-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:5e9cd33459afa69c88fa648e803d1f1245e3caa60bfe8b80a9595e5edd3bda9c"}, + {file = "SQLAlchemy-1.4.29-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eeaebceb24b46e884c4ad3c04f37feb178b81f6ce720af19bfa2592ca32fdef7"}, + {file = "SQLAlchemy-1.4.29-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e89347d3bd2ef873832b47e85f4bbd810a5e626c5e749d90a07638da100eb1c8"}, + {file = "SQLAlchemy-1.4.29-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a717c2e70fd1bb477161c4cc85258e41d978584fbe5522613618195f7e87d9b"}, + {file = "SQLAlchemy-1.4.29-cp37-cp37m-win32.whl", hash = "sha256:f74d6c05d2d163464adbdfbc1ab85048cc15462ff7d134b8aed22bd521e1faa5"}, + {file = "SQLAlchemy-1.4.29-cp37-cp37m-win_amd64.whl", hash = "sha256:621854dbb4d2413c759a5571564170de45ef37299df52e78e62b42e2880192e1"}, + {file = "SQLAlchemy-1.4.29-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:f3909194751bb6cb7c5511dd18bcf77e6e3f0b31604ed4004dffa9461f71e737"}, + {file = "SQLAlchemy-1.4.29-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd49d21d1f03c81fbec9080ecdc4486d5ddda67e7fbb75ebf48294465c022cdc"}, + {file = "SQLAlchemy-1.4.29-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e5f6959466a42b6569774c257e55f9cd85200d5b0ba09f0f5d8b5845349c5822"}, + {file = "SQLAlchemy-1.4.29-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0072f9887aabe66db23f818bbe950cfa1b6127c5cb769b00bcc07935b3adb0ad"}, + {file = "SQLAlchemy-1.4.29-cp38-cp38-win32.whl", hash = "sha256:ad618d687d26d4cbfa9c6fa6141d59e05bcdfc60cb6e1f1d3baa18d8c62fef5f"}, + {file = "SQLAlchemy-1.4.29-cp38-cp38-win_amd64.whl", hash = "sha256:878daecb6405e786b07f97e1c77a9cfbbbec17432e8a90c487967e32cfdecb33"}, + {file = "SQLAlchemy-1.4.29-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:e027bdf0a4cf6bd0a3ad3b998643ea374d7991bd117b90bf9982e41ceb742941"}, + {file = "SQLAlchemy-1.4.29-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5de7adfb91d351f44062b8dedf29f49d4af7cb765be65816e79223a4e31062b"}, + {file = "SQLAlchemy-1.4.29-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fbc6e63e481fa323036f305ada96a3362e1d60dd2bfa026cac10c3553e6880e9"}, + {file = "SQLAlchemy-1.4.29-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7dd0502cb091660ad0d89c5e95a29825f37cde2a5249957838e975871fbffaad"}, + {file = "SQLAlchemy-1.4.29-cp39-cp39-win32.whl", hash = "sha256:37b46bfc4af3dc226acb6fa28ecd2e1fd223433dc5e15a2bad62bf0a0cbb4e8b"}, + {file = "SQLAlchemy-1.4.29-cp39-cp39-win_amd64.whl", hash = "sha256:08cfd35eecaba79be930c9bfd2e1f0c67a7e1314355d83a378f9a512b1cf7587"}, + {file = "SQLAlchemy-1.4.29.tar.gz", hash = "sha256:fa2bad14e1474ba649cfc969c1d2ec915dd3e79677f346bbfe08e93ef9020b39"}, ] srsly = [ - {file = "srsly-2.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:834229df7377386e9990fd245e1ae12a72152997fd159a782a798b638721a7b2"}, + {file = "srsly-2.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5e22bbc1a20abf749fa53adf101c36bc369ec63f496c7a44bf4f5f287d724900"}, {file = "srsly-2.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004d29a5abc0fe632434359c0be170490a69c4dce2c3de8a769944c37da7bb4b"}, {file = "srsly-2.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7ced7ec4993b4d4ad73cc442f8f7a518368348054d510864b1aa149e8d71654d"}, {file = "srsly-2.4.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:801c7e6e32c6a4721ab78ab7dafd01074fdb144f4876c09b25305c98f95c470f"}, @@ -5371,7 +5397,7 @@ tensorboard-data-server = [ {file = "tensorboard_data_server-0.6.1-py3-none-manylinux2010_x86_64.whl", hash = "sha256:d8237580755e58eff68d1f3abefb5b1e39ae5c8b127cc40920f9c4fb33f4b98a"}, ] tensorboard-plugin-wit = [ - {file = "tensorboard_plugin_wit-1.8.0-py3-none-any.whl", hash = "sha256:2a80d1c551d741e99b2f197bb915d8a133e24adb8da1732b840041860f91183a"}, + {file = "tensorboard_plugin_wit-1.8.1-py3-none-any.whl", hash = "sha256:ff26bdd583d155aa951ee3b152b3d0cffae8005dc697f72b44a8e8c2a77a8cbe"}, ] tensorflow = [ {file = "tensorflow-2.6.1-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:dd0036a6fdb78958dcab3bdc4b3df23d7b9ab80ca51c03f2a84b4d354bbf36a1"}, @@ -5428,10 +5454,11 @@ termcolor = [ {file = "termcolor-1.1.0.tar.gz", hash = "sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b"}, ] terminaltables = [ - {file = "terminaltables-3.1.0.tar.gz", hash = "sha256:f3eb0eb92e3833972ac36796293ca0906e998dc3be91fbe1f8615b331b853b81"}, + {file = "terminaltables-3.1.10-py2.py3-none-any.whl", hash = "sha256:e4fdc4179c9e4aab5f674d80f09d76fa436b96fdc698a8505e0a36bf0804a874"}, + {file = "terminaltables-3.1.10.tar.gz", hash = "sha256:ba6eca5cb5ba02bba4c9f4f985af80c54ec3dccf94cfcd190154386255e47543"}, ] thinc = [ - {file = "thinc-8.0.13-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ad8794a76725b85847528fd1a56471d5ac00f4104da8efb065ba572238e381a2"}, + {file = "thinc-8.0.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f818b9f012169a11beb3561c43dc52080588e50cf495733e492efab8b9b4135e"}, {file = "thinc-8.0.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f520daf45b7f42a04363852df43be1b423ae42d9327709d74f6c3279b3f73778"}, {file = "thinc-8.0.13-cp310-cp310-win_amd64.whl", hash = "sha256:2b217059c9e126220b77e7d6c9da56912c4e1eb4e8a11af14f17752e198e88cc"}, {file = "thinc-8.0.13-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0f956c693d180209075703072fd226a24408cbe80eb67bd3b6eea407f61cb283"}, @@ -5527,8 +5554,8 @@ typed-ast = [ {file = "typed_ast-1.4.3.tar.gz", hash = "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"}, ] typeguard = [ - {file = "typeguard-2.13.2-py3-none-any.whl", hash = "sha256:4f7da3d80dda5e42d6973f11f33da3542b8bf86edc12ba926b2dbad62adf3fcf"}, - {file = "typeguard-2.13.2.tar.gz", hash = "sha256:7e50071590ab997509aa0977609eb5cf9d73d84c1f416cb4fab78b77a9d15326"}, + {file = "typeguard-2.13.3-py3-none-any.whl", hash = "sha256:5e3e3be01e887e7eafae5af63d1f36c849aaa94e3a0112097312aabfa16284f1"}, + {file = "typeguard-2.13.3.tar.gz", hash = "sha256:00edaa8da3a133674796cf5ea87d9f4b4c367d77476e185e80251cc13dfbb8c4"}, ] typer = [ {file = "typer-0.4.0-py3-none-any.whl", hash = "sha256:d81169725140423d072df464cad1ff25ee154ef381aaf5b8225352ea187ca338"}, @@ -5543,16 +5570,20 @@ types-python-dateutil = [ {file = "types_python_dateutil-0.1.6-py3-none-any.whl", hash = "sha256:5b6241ea9fca2d8878cc152017d9524da62a7a856b98e31006e68b02aab47442"}, ] types-pytz = [ - {file = "types-pytz-2021.3.1.tar.gz", hash = "sha256:dffd77f3efecd3b1555f187a9bf3a638d55fac296700b829c41bd51ec72a6eb7"}, - {file = "types_pytz-2021.3.1-py3-none-any.whl", hash = "sha256:d58a0688094b768d8e21c044e45861cbcaecba0494fd5b9c5feb3e1739211606"}, + {file = "types-pytz-2021.3.4.tar.gz", hash = "sha256:101da53091013bb07403468c20d36930d749d3918054ac46f9c1bfc607dadf7d"}, + {file = "types_pytz-2021.3.4-py3-none-any.whl", hash = "sha256:ccfa2ed29f816e3de2f882541c06ad2791f808a79cfe38265411820190999f0f"}, ] types-requests = [ - {file = "types-requests-2.26.1.tar.gz", hash = "sha256:0893e112e1510bbb67f537941c92192de7472e51bf7f236e0e583866f0ed933e"}, - {file = "types_requests-2.26.1-py3-none-any.whl", hash = "sha256:853571b3accc188976c0f4feffcaebf6cdfc170082b5e43f3358aa78de61f531"}, + {file = "types-requests-2.27.5.tar.gz", hash = "sha256:a67dc1a8512312b8cb89f3ba95f9a0e69ef3436ae77c9bd4f328cd88f17adda2"}, + {file = "types_requests-2.27.5-py3-none-any.whl", hash = "sha256:9c9390b18b222956155af6678570f452edafa3bb94bd5a4efe67da1105aa128c"}, ] types-setuptools = [ - {file = "types-setuptools-57.4.4.tar.gz", hash = "sha256:a3cbcbf3f02142bb5d3b5c5f5918f453b8752362b96d58aba2a5cfa43ba6d209"}, - {file = "types_setuptools-57.4.4-py3-none-any.whl", hash = "sha256:9fe4180548e5cbb44cc0d343a47e4dc1d6769c31e16447994788c28df169011f"}, + {file = "types-setuptools-57.4.7.tar.gz", hash = "sha256:9677d969b00ec1c14552f5be2b2b47a6fbea4d0ed4de0fdcee18abdaa0cc9267"}, + {file = "types_setuptools-57.4.7-py3-none-any.whl", hash = "sha256:ffda504687ea02d4b7751c0d1df517fbbcdc276836d90849e4f1a5f1ccd79f01"}, +] +types-urllib3 = [ + {file = "types-urllib3-1.26.4.tar.gz", hash = "sha256:35c17be09e1bcef3b1e2101c5172b97cdb78330915973c741f4c1979d8405ef9"}, + {file = "types_urllib3-1.26.4-py3-none-any.whl", hash = "sha256:61a0099899b1472bfbae8ee6ad196950aac46705b99216188aa962ad1e514ed5"}, ] typing-extensions = [ {file = "typing_extensions-3.7.4.3-py2-none-any.whl", hash = "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f"}, @@ -5618,8 +5649,8 @@ uritemplate = [ {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, ] urllib3 = [ - {file = "urllib3-1.26.7-py2.py3-none-any.whl", hash = "sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844"}, - {file = "urllib3-1.26.7.tar.gz", hash = "sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece"}, + {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"}, + {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"}, ] uvloop = [ {file = "uvloop-0.14.0-cp35-cp35m-macosx_10_11_x86_64.whl", hash = "sha256:08b109f0213af392150e2fe6f81d33261bb5ce968a288eb698aad4f46eb711bd"}, @@ -5799,6 +5830,6 @@ yarl = [ {file = "yarl-1.7.2.tar.gz", hash = "sha256:45399b46d60c253327a460e99856752009fcee5f5d3c80b2f7c0cae1c38d56dd"}, ] zipp = [ - {file = "zipp-3.6.0-py3-none-any.whl", hash = "sha256:9fe5ea21568a0a70e50f273397638d39b03353731e6cbbb3fd8502a33fec40bc"}, - {file = "zipp-3.6.0.tar.gz", hash = "sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832"}, + {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, + {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"}, ] diff --git a/pyproject.toml b/pyproject.toml index 40c4ba393819..637fe6c8af46 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,7 +93,7 @@ pytz = ">=2019.1,<2022.0" rasa-sdk = "^3.0.0" colorclass = "~2.2" terminaltables = "~3.1.0" -sanic = "^21.6.0" +sanic = "^21.9.3" sanic-cors = "^1.0.0" sanic-jwt = "^1.6.0" cloudpickle = ">=1.2,<1.7" @@ -132,10 +132,10 @@ keras = "<2.7.0" sanic-routing = "^0.7.2" [tool.poetry.dev-dependencies] -pytest-cov = "^2.10.0" +pytest-cov = "^2.12.1" pytest-asyncio = "^0.14.0" pytest-xdist = "^2.2.1" -pytest = "^6.2.2" +pytest = "^6.2.5" freezegun = "^1.0.0" responses = "^0.13.3" aioresponses = "^0.7.2" @@ -143,13 +143,13 @@ moto = "~=2.2.6" fakeredis = "^1.5.2" mongomock = "^3.18.0" black = "^19.10b0" -flake8 = "^3.8.3" -flake8-docstrings = "^1.5.0" +flake8 = "^3.9.2" +flake8-docstrings = "^1.6.0" google-cloud-storage = "^1.40.0" azure-storage-blob = "<12.9.0" -coveralls = "^3.0.1" +coveralls = "^3.1.0" towncrier = "^21.3.0" -toml = "^0.10.0" +toml = "^0.10.2" pep440-version-utils = "^0.3.0" pydoc-markdown = "^3.10.3" pytest-timeout = "^1.4.2" From b235ad0635ebd09cb7e4ce66fbd8dad4b14bf81f Mon Sep 17 00:00:00 2001 From: carlad Date: Tue, 11 Jan 2022 17:53:26 +0100 Subject: [PATCH 2/9] use black 21.7b0 --- poetry.lock | 69 ++++++++++++++++++++++++++++++-------------------- pyproject.toml | 16 ++++++------ 2 files changed, 50 insertions(+), 35 deletions(-) diff --git a/poetry.lock b/poetry.lock index 37cd20a4458b..507655db6708 100644 --- a/poetry.lock +++ b/poetry.lock @@ -227,23 +227,27 @@ python-versions = ">=3.6" [[package]] name = "black" -version = "19.10b0" +version = "21.7b0" description = "The uncompromising code formatter." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.6.2" [package.dependencies] appdirs = "*" -attrs = ">=18.1.0" -click = ">=6.5" -pathspec = ">=0.6,<1" -regex = "*" -toml = ">=0.9.4" -typed-ast = ">=1.4.0" +click = ">=7.1.2" +mypy-extensions = ">=0.4.3" +pathspec = ">=0.8.1,<1" +regex = ">=2020.1.8" +tomli = ">=0.2.6,<2.0.0" +typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\""} +typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} [package.extras] -d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.6.0)", "aiohttp-cors (>=0.4.0)"] +python2 = ["typed-ast (>=1.4.2)"] +uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "blis" @@ -714,7 +718,7 @@ python-dateutil = ">=2.7" [[package]] name = "fsspec" -version = "2021.11.1" +version = "2022.1.0" description = "File-system specification" category = "main" optional = false @@ -2369,7 +2373,7 @@ tqdm = "*" [[package]] name = "sanic" -version = "21.12.1" +version = "21.9.3" description = "A web server and web framework that's written to go fast. Build fast. Run fast." category = "main" optional = false @@ -2385,11 +2389,10 @@ uvloop = {version = ">=0.5.3", markers = "sys_platform != \"win32\" and implemen websockets = ">=10.0" [package.extras] -all = ["mistune (<2.0.0)", "flake8", "isort (>=5.0.0)", "sanic-testing (>=0.7.0)", "towncrier", "bandit", "m2r2", "mypy (>=0.901,<0.910)", "pytest-cov", "sphinx (>=2.1.2)", "coverage (==5.3)", "pytest-sugar", "cryptography", "tox", "beautifulsoup4", "black", "pytest-benchmark", "docutils", "gunicorn (==20.0.4)", "pytest-sanic", "uvicorn (<0.15.0)", "pygments", "chardet (>=3.0.0,<4.0.0)", "sphinx-rtd-theme (>=0.4.3)", "pytest (==6.2.5)", "types-ujson"] -dev = ["sanic-testing (>=0.7.0)", "pytest (==6.2.5)", "coverage (==5.3)", "gunicorn (==20.0.4)", "pytest-cov", "beautifulsoup4", "pytest-sanic", "pytest-sugar", "pytest-benchmark", "chardet (>=3.0.0,<4.0.0)", "flake8", "black", "isort (>=5.0.0)", "bandit", "mypy (>=0.901,<0.910)", "docutils", "pygments", "uvicorn (<0.15.0)", "cryptography", "tox", "towncrier", "types-ujson"] -docs = ["sphinx (>=2.1.2)", "sphinx-rtd-theme (>=0.4.3)", "docutils", "pygments", "m2r2", "mistune (<2.0.0)"] -ext = ["sanic-ext"] -test = ["sanic-testing (>=0.7.0)", "pytest (==6.2.5)", "coverage (==5.3)", "gunicorn (==20.0.4)", "pytest-cov", "beautifulsoup4", "pytest-sanic", "pytest-sugar", "pytest-benchmark", "chardet (>=3.0.0,<4.0.0)", "flake8", "black", "isort (>=5.0.0)", "bandit", "mypy (>=0.901,<0.910)", "docutils", "pygments", "uvicorn (<0.15.0)", "types-ujson"] +all = ["isort (>=5.0.0)", "beautifulsoup4", "chardet (>=3.0.0,<4.0.0)", "pytest-benchmark", "pytest-cov", "pytest-sugar", "m2r2", "pygments", "mypy (>=0.901)", "towncrier", "pytest (==5.2.1)", "uvicorn (<0.15.0)", "coverage (==5.3)", "pytest-sanic", "flake8", "sphinx (>=2.1.2)", "sanic-testing (>=0.7.0)", "bandit", "black", "gunicorn (==20.0.4)", "tox", "sphinx-rtd-theme (>=0.4.3)", "docutils", "types-ujson"] +dev = ["sanic-testing (>=0.7.0)", "pytest (==5.2.1)", "coverage (==5.3)", "gunicorn (==20.0.4)", "pytest-cov", "beautifulsoup4", "pytest-sanic", "pytest-sugar", "pytest-benchmark", "chardet (>=3.0.0,<4.0.0)", "flake8", "black", "isort (>=5.0.0)", "bandit", "mypy (>=0.901)", "docutils", "pygments", "uvicorn (<0.15.0)", "tox", "towncrier", "types-ujson"] +docs = ["sphinx (>=2.1.2)", "sphinx-rtd-theme (>=0.4.3)", "docutils", "pygments", "m2r2"] +test = ["sanic-testing (>=0.7.0)", "pytest (==5.2.1)", "coverage (==5.3)", "gunicorn (==20.0.4)", "pytest-cov", "beautifulsoup4", "pytest-sanic", "pytest-sugar", "pytest-benchmark", "chardet (>=3.0.0,<4.0.0)", "flake8", "black", "isort (>=5.0.0)", "bandit", "mypy (>=0.901)", "docutils", "pygments", "uvicorn (<0.15.0)", "types-ujson"] [[package]] name = "sanic-cors" @@ -2420,14 +2423,14 @@ docs = ["sphinx"] [[package]] name = "sanic-plugin-toolkit" -version = "1.2.0" +version = "1.2.1" description = "The all-in-one toolkit for creating powerful Sanic Plugins" category = "main" optional = false python-versions = ">=3.7,<4.0" [package.dependencies] -sanic = "*" +sanic = ">=21.3.1,<21.12.0" [[package]] name = "sanic-routing" @@ -2980,6 +2983,14 @@ category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +[[package]] +name = "tomli" +version = "1.2.3" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.6" + [[package]] name = "toolz" version = "0.11.2" @@ -3334,7 +3345,7 @@ transformers = ["transformers"] [metadata] lock-version = "1.1" python-versions = ">=3.7,<3.9" -content-hash = "8174efe1009da64964e0bcde91c9f737ca4e3b30a113d7cabef6837d09d63d40" +content-hash = "5b5b943ec26ad18173783d94698ff6c2b47643af4c52cc50bba6b00c8d4ccf0a" [metadata.files] absl-py = [ @@ -3445,8 +3456,8 @@ bidict = [ {file = "bidict-0.21.4.tar.gz", hash = "sha256:42c84ffbe6f8de898af6073b4be9ea7ccedcd78d3474aa844c54e49d5a079f6f"}, ] black = [ - {file = "black-19.10b0-py36-none-any.whl", hash = "sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b"}, - {file = "black-19.10b0.tar.gz", hash = "sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539"}, + {file = "black-21.7b0-py3-none-any.whl", hash = "sha256:1c7aa6ada8ee864db745b22790a32f94b2795c253a75d6d9b5e439ff10d23116"}, + {file = "black-21.7b0.tar.gz", hash = "sha256:c8373c6491de9362e39271630b65b964607bc5c79c83783547d76c839b3aa219"}, ] blis = [ {file = "blis-0.7.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5812a7c04561ae7332cf730f57d9f82cbd12c5f86a5bfad66ee244e51d06266d"}, @@ -3763,8 +3774,8 @@ freezegun = [ {file = "freezegun-1.1.0.tar.gz", hash = "sha256:177f9dd59861d871e27a484c3332f35a6e3f5d14626f2bf91be37891f18927f3"}, ] fsspec = [ - {file = "fsspec-2021.11.1-py3-none-any.whl", hash = "sha256:bcb136caa37e1470dd8314a7d3917cb9b25dd9da44c10d36df556ab4ef038185"}, - {file = "fsspec-2021.11.1.tar.gz", hash = "sha256:03683e606651d5e4bd9180525d57477bd5430e5dc68d2e459835dc14cecc3dd4"}, + {file = "fsspec-2022.1.0-py3-none-any.whl", hash = "sha256:256e2be44e62430c9ca8dac2e480384b00a3c52aef4e2b0b7204163fdc861d37"}, + {file = "fsspec-2022.1.0.tar.gz", hash = "sha256:0bdd519bbf4d8c9a1d893a50b5ebacc89acd0e1fe0045d2f7b0e0c1af5990edc"}, ] future = [ {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"}, @@ -5125,8 +5136,8 @@ sacremoses = [ {file = "sacremoses-0.0.47-py2.py3-none-any.whl", hash = "sha256:7622c6e9fe12d45b7acf4528451bd054c1557c1f6779398f9cd9f28332d92a0b"}, ] sanic = [ - {file = "sanic-21.12.1-py3-none-any.whl", hash = "sha256:53230ba1a1081e6b075e58339a47c07d2bf618119fa40ba88c69fe57c99126f1"}, - {file = "sanic-21.12.1.tar.gz", hash = "sha256:6f15ecfef47d4288aac04d8741833a7701cd94f2d4a28fc5667ce2844363152d"}, + {file = "sanic-21.9.3-py3-none-any.whl", hash = "sha256:354fbc9e5382df23989752067a57d042aef99bf5a8e85593a3e0112276bff9e8"}, + {file = "sanic-21.9.3.tar.gz", hash = "sha256:5edb41d0d30cf47a25cf991b7465f53ea161b43e3cd20d8985e68ecf7fb7ad24"}, ] sanic-cors = [ {file = "Sanic-Cors-1.0.1.tar.gz", hash = "sha256:ac5d40bd45022d21296887d2238891d0ad67308ffba55be809df0151d36071b5"}, @@ -5137,8 +5148,8 @@ sanic-jwt = [ {file = "sanic_jwt-1.7.0-py3-none-any.whl", hash = "sha256:1dbcfec93cef77a41835aa38eae27780fc4255b9c1b067674ce2f0d2f5d45353"}, ] sanic-plugin-toolkit = [ - {file = "sanic-plugin-toolkit-1.2.0.tar.gz", hash = "sha256:5d477e6c1832968d43c7112e1a445afc98153fa05236c4db32b68e2cfff86001"}, - {file = "sanic_plugin_toolkit-1.2.0-py3-none-any.whl", hash = "sha256:3ab62af7c2d3e73d37d217976ec11f3683bde588d647e883a42763a894058b50"}, + {file = "sanic-plugin-toolkit-1.2.1.tar.gz", hash = "sha256:9f6eeca2e28b915dba4be8584ebb5a5f76a3f6895fe34572042ec275b13e41e1"}, + {file = "sanic_plugin_toolkit-1.2.1-py3-none-any.whl", hash = "sha256:aabea0dc1fd71969567c6a0fa419b55a727c0a5be372f166560e0cdbb9c30abb"}, ] sanic-routing = [ {file = "sanic-routing-0.7.2.tar.gz", hash = "sha256:139ce88b3f054e7aa336e2ecc8459837092b103b275d3a97609a34092c55374d"}, @@ -5502,6 +5513,10 @@ toml = [ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] +tomli = [ + {file = "tomli-1.2.3-py3-none-any.whl", hash = "sha256:e3069e4be3ead9668e21cb9b074cd948f7b3113fd9c8bba083f48247aab8b11c"}, + {file = "tomli-1.2.3.tar.gz", hash = "sha256:05b6166bff487dc068d322585c7ea4ef78deed501cc124060e0f238e89a9231f"}, +] toolz = [ {file = "toolz-0.11.2-py3-none-any.whl", hash = "sha256:a5700ce83414c64514d82d60bcda8aabfde092d1c1a8663f9200c07fdcc6da8f"}, {file = "toolz-0.11.2.tar.gz", hash = "sha256:6b312d5e15138552f1bda8a4e66c30e236c831b612b2bf0005f8a1df10a4bc33"}, diff --git a/pyproject.toml b/pyproject.toml index 637fe6c8af46..6e41d79ee275 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,7 +93,7 @@ pytz = ">=2019.1,<2022.0" rasa-sdk = "^3.0.0" colorclass = "~2.2" terminaltables = "~3.1.0" -sanic = "^21.9.3" +sanic = "^21.6.0" sanic-cors = "^1.0.0" sanic-jwt = "^1.6.0" cloudpickle = ">=1.2,<1.7" @@ -132,24 +132,24 @@ keras = "<2.7.0" sanic-routing = "^0.7.2" [tool.poetry.dev-dependencies] -pytest-cov = "^2.12.1" +pytest-cov = "^2.10.0" pytest-asyncio = "^0.14.0" pytest-xdist = "^2.2.1" -pytest = "^6.2.5" +pytest = "^6.2.2" freezegun = "^1.0.0" responses = "^0.13.3" aioresponses = "^0.7.2" moto = "~=2.2.6" fakeredis = "^1.5.2" mongomock = "^3.18.0" -black = "^19.10b0" -flake8 = "^3.9.2" -flake8-docstrings = "^1.6.0" +black = "21.7b0" +flake8 = "^3.8.3" +flake8-docstrings = "^1.5.0" google-cloud-storage = "^1.40.0" azure-storage-blob = "<12.9.0" -coveralls = "^3.1.0" +coveralls = "^3.0.1" towncrier = "^21.3.0" -toml = "^0.10.2" +toml = "^0.10.0" pep440-version-utils = "^0.3.0" pydoc-markdown = "^3.10.3" pytest-timeout = "^1.4.2" From be72a3ca8c3a321766abe6ea151fdb6b595106fc Mon Sep 17 00:00:00 2001 From: carlad Date: Wed, 12 Jan 2022 11:55:47 +0100 Subject: [PATCH 3/9] apply docstring reformatting --- .pre-commit-config.yaml | 5 +- rasa/core/actions/action.py | 12 +- rasa/engine/caching.py | 6 +- rasa/nlu/emulators/dialogflow.py | 2 +- rasa/shared/core/generator.py | 7 +- rasa/shared/core/slots.py | 21 ++- rasa/shared/core/trackers.py | 5 +- .../story_writer/yaml_story_writer.py | 5 +- rasa/shared/core/training_data/structures.py | 10 +- tests/core/evaluation/test_marker.py | 34 ++++- tests/core/evaluation/test_marker_stats.py | 32 ++++- .../test_single_state_featurizers.py | 37 ++++-- tests/core/test_processor.py | 43 ++++-- .../test_default_recipe_validator.py | 125 +++++++++++++++--- tests/nlu/classifiers/test_diet_classifier.py | 48 +++++-- tests/nlu/featurizers/test_lm_featurizer.py | 31 +++-- tests/nlu/selectors/test_selectors.py | 40 ++++-- tests/nlu/test_train.py | 26 +++- tests/utils/tensorflow/test_models.py | 8 +- tests/utils/tensorflow/test_rasa_layers.py | 105 +++++++++++---- 20 files changed, 457 insertions(+), 145 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c8c6025ff85e..d2870c46d47d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,10 +1,10 @@ repos: - repo: https://github.com/ambv/black - rev: 19.10b0 + rev: 21.12b0 hooks: - id: black - repo: https://github.com/thlorenz/doctoc - rev: master + rev: v2.1.0 hooks: - id: doctoc files: "CONTRIBUTING.md" @@ -16,3 +16,4 @@ repos: language: system types: [python] pass_filenames: false + \ No newline at end of file diff --git a/rasa/core/actions/action.py b/rasa/core/actions/action.py index 34cffc065d2b..3250be71d3ce 100644 --- a/rasa/core/actions/action.py +++ b/rasa/core/actions/action.py @@ -923,9 +923,9 @@ async def run( intent_to_affirm = latest_message.intent.get(INTENT_NAME_KEY) - intent_ranking: List["IntentPrediction"] = latest_message.parse_data.get( - INTENT_RANKING_KEY - ) or [] + intent_ranking: List["IntentPrediction"] = ( + latest_message.parse_data.get(INTENT_RANKING_KEY) or [] + ) if ( intent_to_affirm == DEFAULT_NLU_FALLBACK_INTENT_NAME and len(intent_ranking) > 1 @@ -1076,7 +1076,11 @@ async def _execute_custom_action( return [], executed_custom_actions slot_events = await self._run_custom_action( - custom_action, output_channel, nlg, tracker, domain, + custom_action, + output_channel, + nlg, + tracker, + domain, ) executed_custom_actions.add(custom_action) diff --git a/rasa/engine/caching.py b/rasa/engine/caching.py index d5301385afd9..ea934367eb2a 100644 --- a/rasa/engine/caching.py +++ b/rasa/engine/caching.py @@ -227,9 +227,9 @@ def _drop_cache_entries_from_incompatible_versions(self) -> None: def _find_incompatible_cache_entries(self) -> List[LocalTrainingCache.CacheEntry]: with self._sessionmaker() as session: query_for_cache_entries = sa.select(self.CacheEntry) - all_entries: List[LocalTrainingCache.CacheEntry] = session.execute( - query_for_cache_entries - ).scalars().all() + all_entries: List[LocalTrainingCache.CacheEntry] = ( + session.execute(query_for_cache_entries).scalars().all() + ) return [ entry diff --git a/rasa/nlu/emulators/dialogflow.py b/rasa/nlu/emulators/dialogflow.py index 89d4a4647735..6e7665bb8858 100644 --- a/rasa/nlu/emulators/dialogflow.py +++ b/rasa/nlu/emulators/dialogflow.py @@ -23,7 +23,7 @@ class DialogflowEmulator(Emulator): """ def normalise_response_json(self, data: Dict[Text, Any]) -> Dict[Text, Any]: - """"Transform response JSON to DialogFlow format. + """Transform response JSON to DialogFlow format. Args: data: input JSON data as a dictionary. diff --git a/rasa/shared/core/generator.py b/rasa/shared/core/generator.py index 6c6120e83d62..7c1022b215c2 100644 --- a/rasa/shared/core/generator.py +++ b/rasa/shared/core/generator.py @@ -85,7 +85,9 @@ def from_events( return tracker def past_states_for_hashing( - self, domain: Domain, omit_unset_slots: bool = False, + self, + domain: Domain, + omit_unset_slots: bool = False, ) -> Deque[FrozenState]: """Generates and caches the past states of this tracker based on the history. @@ -190,8 +192,7 @@ def _append_current_state(self) -> None: self._states_for_hashing.append(frozen_state) def update(self, event: Event, skip_states: bool = False) -> None: - """Modify the state of the tracker according to an ``Event``. """ - + """Modify the state of the tracker according to an ``Event``.""" # if `skip_states` is `True`, this function behaves exactly like the # normal update of the `DialogueStateTracker` diff --git a/rasa/shared/core/slots.py b/rasa/shared/core/slots.py index bc63ca25a3da..813fe308f18c 100644 --- a/rasa/shared/core/slots.py +++ b/rasa/shared/core/slots.py @@ -182,7 +182,11 @@ def __init__( UserWarning, if initial_value is outside the min-max range. """ super().__init__( - name, mappings, initial_value, value_reset_delay, influence_conversation, + name, + mappings, + initial_value, + value_reset_delay, + influence_conversation, ) self.max_value = max_value self.min_value = min_value @@ -244,8 +248,7 @@ def _feature_dimensionality(self) -> int: def bool_from_any(x: Any) -> bool: - """ Converts bool/float/int/str to bool or raises error """ - + """Converts bool/float/int/str to bool or raises error.""" if isinstance(x, bool): return x elif isinstance(x, (float, int)): @@ -310,7 +313,11 @@ def __init__( ) -> None: """Creates a `Categorical Slot` (see parent class for detailed docstring).""" super().__init__( - name, mappings, initial_value, value_reset_delay, influence_conversation, + name, + mappings, + initial_value, + value_reset_delay, + influence_conversation, ) if values and None in values: rasa.shared.utils.io.raise_warning( @@ -422,7 +429,11 @@ def __init__( ) super().__init__( - name, mappings, initial_value, value_reset_delay, influence_conversation, + name, + mappings, + initial_value, + value_reset_delay, + influence_conversation, ) def __eq__(self, other: Any) -> bool: diff --git a/rasa/shared/core/trackers.py b/rasa/shared/core/trackers.py index 969f3b974157..d7a74ed0c74f 100644 --- a/rasa/shared/core/trackers.py +++ b/rasa/shared/core/trackers.py @@ -579,7 +579,8 @@ def _undo_till_previous_loop_execution( break if isinstance( - e, (ActionExecuted, UserUttered, DefinePrevUserUtteredFeaturization), + e, + (ActionExecuted, UserUttered, DefinePrevUserUtteredFeaturization), ): del done_events[-1 - offset] else: @@ -640,7 +641,7 @@ def as_dialogue(self) -> Dialogue: return Dialogue(self.sender_id, list(self.events)) def update(self, event: Event, domain: Optional[Domain] = None) -> None: - """Modify the state of the tracker according to an ``Event``. """ + """Modify the state of the tracker according to an ``Event``.""" if not isinstance(event, Event): # pragma: no cover raise ValueError("event to log must be an instance of a subclass of Event.") diff --git a/rasa/shared/core/training_data/story_writer/yaml_story_writer.py b/rasa/shared/core/training_data/story_writer/yaml_story_writer.py index ae25a94d7a96..5b77e01e7cc3 100644 --- a/rasa/shared/core/training_data/story_writer/yaml_story_writer.py +++ b/rasa/shared/core/training_data/story_writer/yaml_story_writer.py @@ -56,7 +56,7 @@ class YAMLStoryWriter(StoryWriter): - """Writes Core training data into a file in a YAML format. """ + """Writes Core training data into a file in a YAML format.""" def dumps( self, @@ -229,7 +229,8 @@ def process_user_utterance( [(entity["entity"], entity["value"])] ) entity_map.yaml_add_eol_comment( - commented_entity, entity["entity"], + commented_entity, + entity["entity"], ) entities.append(entity_map) else: diff --git a/rasa/shared/core/training_data/structures.py b/rasa/shared/core/training_data/structures.py index 40461e6ba3bd..685b6f024d52 100644 --- a/rasa/shared/core/training_data/structures.py +++ b/rasa/shared/core/training_data/structures.py @@ -305,9 +305,9 @@ def __repr__(self) -> Text: class RuleStep(StoryStep): - """A Special type of StoryStep representing a Rule. """ + """A Special type of StoryStep representing a Rule.""" - def __init__( + def __init__( # noqa: D107 self, block_name: Optional[Text] = None, start_checkpoints: Optional[List[Checkpoint]] = None, @@ -351,8 +351,7 @@ def __repr__(self) -> Text: ) def get_rules_condition(self) -> List[Event]: - """Returns a list of events forming a condition of the Rule. """ - + """Returns a list of events forming a condition of the Rule.""" return [ event for event_id, event in enumerate(self.events) @@ -360,8 +359,7 @@ def get_rules_condition(self) -> List[Event]: ] def get_rules_events(self) -> List[Event]: - """Returns a list of events forming the Rule, that are not conditions. """ - + """Returns a list of events forming the Rule, that are not conditions.""" return [ event for event_id, event in enumerate(self.events) diff --git a/tests/core/evaluation/test_marker.py b/tests/core/evaluation/test_marker.py index 9cd9f7f347e4..ea6dc9a5a634 100644 --- a/tests/core/evaluation/test_marker.py +++ b/tests/core/evaluation/test_marker.py @@ -332,7 +332,12 @@ def test_operators_nested_simple(): marker = AndMarker( markers=[ SlotSetMarker("s1"), - OrMarker([IntentDetectedMarker("4"), IntentDetectedMarker("6"),]), + OrMarker( + [ + IntentDetectedMarker("4"), + IntentDetectedMarker("6"), + ] + ), ], name="marker_name", ) @@ -631,13 +636,16 @@ def test_marker_validation_raises(config: Any): Marker.from_config(config) -def test_marker_from_path_only_reads_yamls(tmp_path: Path,): +def test_marker_from_path_only_reads_yamls( + tmp_path: Path, +): suffixes = [("yaml", True), ("yml", True), ("yaeml", False), ("config", False)] for idx, (suffix, allowed) in enumerate(suffixes): config = {f"marker-{idx}": {IntentDetectedMarker.positive_tag(): "intent"}} config_file = tmp_path / f"config-{idx}.{suffix}" rasa.shared.utils.io.write_yaml( - data=config, target=config_file, + data=config, + target=config_file, ) loaded = Marker.from_path(tmp_path) assert len(loaded.sub_markers) == sum(allowed for _, allowed in suffixes) @@ -660,7 +668,8 @@ def test_marker_from_path_adds_special_or_marker(tmp_path: Path, configs: Any): yaml_file = tmp_path / "config.yml" rasa.shared.utils.io.write_yaml( - data=configs, target=yaml_file, + data=configs, + target=yaml_file, ) loaded = Marker.from_path(tmp_path) assert isinstance(loaded, OrMarker) @@ -719,13 +728,26 @@ def test_marker_from_path_raises( AndMarker( markers=[ SlotSetMarker("s1"), - OrMarker([IntentDetectedMarker("4"), IntentDetectedMarker("6"),]), + OrMarker( + [ + IntentDetectedMarker("4"), + IntentDetectedMarker("6"), + ] + ), ], ), 3, ), (SlotSetMarker("s1"), 1), - (AndMarker(markers=[SlotSetMarker("s1"), IntentDetectedMarker("6"),],), 2), + ( + AndMarker( + markers=[ + SlotSetMarker("s1"), + IntentDetectedMarker("6"), + ], + ), + 2, + ), ], ) def test_marker_depth(marker: Marker, expected_depth: int): diff --git a/tests/core/evaluation/test_marker_stats.py b/tests/core/evaluation/test_marker_stats.py index a734a3430715..0cc1b8d9716d 100644 --- a/tests/core/evaluation/test_marker_stats.py +++ b/tests/core/evaluation/test_marker_stats.py @@ -139,10 +139,20 @@ def test_process_results_per_session(seed: int): stats.session_results[marker][stat_name][idx], stat_value ) for idx in range(num_sessions): - assert stats.session_identifier[idx] == (sender_ids[idx], session_indices[idx],) + assert stats.session_identifier[idx] == ( + sender_ids[idx], + session_indices[idx], + ) -@pytest.mark.parametrize("seed", [2345, 5654, 2345234,]) +@pytest.mark.parametrize( + "seed", + [ + 2345, + 5654, + 2345234, + ], +) def test_process_results_overall(seed: int): rng = np.random.default_rng(seed=seed) ( @@ -176,7 +186,14 @@ def test_process_results_overall(seed: int): assert stats.num_preceding_user_turns_collected[marker] == concatenated_numbers -@pytest.mark.parametrize("seed", [2345, 5654, 2345234,]) +@pytest.mark.parametrize( + "seed", + [ + 2345, + 5654, + 2345234, + ], +) def test_overall_statistics_to_csv(tmp_path: Path, seed: int): rng = np.random.default_rng(seed=seed) ( @@ -253,7 +270,14 @@ def test_overall_statistics_to_csv(tmp_path: Path, seed: int): row_idx += 1 -@pytest.mark.parametrize("seed", [2345, 5654, 2345234,]) +@pytest.mark.parametrize( + "seed", + [ + 2345, + 5654, + 2345234, + ], +) def test_per_session_statistics_to_csv(tmp_path: Path, seed: int): rng = np.random.default_rng(seed=seed) diff --git a/tests/core/featurizers/test_single_state_featurizers.py b/tests/core/featurizers/test_single_state_featurizers.py index 6587cc7a8947..ca144b7e98a8 100644 --- a/tests/core/featurizers/test_single_state_featurizers.py +++ b/tests/core/featurizers/test_single_state_featurizers.py @@ -222,13 +222,15 @@ def test_encode_state__without_lookup(action_name: Text): def dummy_features( fill_value: int, units: int, attribute: Text, type: Text, is_sparse: bool ) -> Features: - """Create some dummy `Features` with the desired properties. - """ + """Create some dummy `Features` with the desired properties.""" matrix = np.full(shape=(1, units), fill_value=fill_value) if is_sparse: matrix = scipy.sparse.coo_matrix(matrix) return Features( - features=matrix, attribute=attribute, feature_type=type, origin="whatever", + features=matrix, + attribute=attribute, + feature_type=type, + origin="whatever", ) @@ -311,8 +313,15 @@ def test_encode_state__with_lookup__looksup_or_creates_features(action_name: Tex action_text = "throw a ball" intent = "inform" state = { - USER: {TEXT: text, INTENT: intent, ENTITIES: entity_name_list,}, - PREVIOUS_ACTION: {ACTION_NAME: action_name, ACTION_TEXT: action_text,}, + USER: { + TEXT: text, + INTENT: intent, + ENTITIES: entity_name_list, + }, + PREVIOUS_ACTION: { + ACTION_NAME: action_name, + ACTION_TEXT: action_text, + }, ACTIVE_LOOP: {"name": "active_loop_4"}, SLOTS: {"slot_1": (1.0,)}, } @@ -374,7 +383,10 @@ def test_encode_state__with_lookup__looksup_or_creates_features(action_name: Tex precomputations.add(Message(data={ACTION_NAME: action_name})) # encode the state - encoded = f.encode_state(state, precomputations=precomputations,) + encoded = f.encode_state( + state, + precomputations=precomputations, + ) # check all the features are encoded and *_text features are encoded by a # dense featurizer @@ -459,7 +471,8 @@ def test_encode_entities__with_entity_roles_and_groups(): # encode! encoded = f.encode_entities( - entity_data={TEXT: text, ENTITIES: entities}, precomputations=precomputations, + entity_data={TEXT: text, ENTITIES: entities}, + precomputations=precomputations, ) # check @@ -519,7 +532,10 @@ def test_encode_entities__with_bilou_entity_roles_and_groups(): # encode! encoded = f.encode_entities( - {TEXT: text, ENTITIES: entities,}, + { + TEXT: text, + ENTITIES: entities, + }, precomputations=precomputations, bilou_tagging=True, ) @@ -552,7 +568,10 @@ def test_encode_entities__with_bilou_entity_roles_and_groups(): # encode! encoded = f.encode_entities( - {TEXT: text, ENTITIES: entities,}, + { + TEXT: text, + ENTITIES: entities, + }, precomputations=precomputations, bilou_tagging=True, ) diff --git a/tests/core/test_processor.py b/tests/core/test_processor.py index 3a6ebb6d9a64..775e841b710f 100644 --- a/tests/core/test_processor.py +++ b/tests/core/test_processor.py @@ -590,7 +590,8 @@ async def test_update_tracker_session( async def test_update_tracker_session_with_metadata( - default_processor: MessageProcessor, monkeypatch: MonkeyPatch, + default_processor: MessageProcessor, + monkeypatch: MonkeyPatch, ): model_id = default_processor.model_metadata.model_id sender_id = uuid.uuid4().hex @@ -607,7 +608,11 @@ async def test_update_tracker_session_with_metadata( events = list(tracker.events) assert events[0] == with_model_id( - SlotSet(SESSION_START_METADATA_SLOT, message_metadata,), model_id + SlotSet( + SESSION_START_METADATA_SLOT, + message_metadata, + ), + model_id, ) assert tracker.slots[SESSION_START_METADATA_SLOT].value == message_metadata @@ -876,7 +881,10 @@ async def test_handle_message_with_session_start( SlotSet(entity, slot_1[entity]), DefinePrevUserUtteredFeaturization(False), ActionExecuted("utter_greet"), - BotUttered("hey there Core!", metadata={"utter_action": "utter_greet"},), + BotUttered( + "hey there Core!", + metadata={"utter_action": "utter_greet"}, + ), ActionExecuted(ACTION_LISTEN_NAME), ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted(), @@ -932,7 +940,10 @@ async def test_should_predict_another_action( async def test_action_unlikely_intent_metadata(default_processor: MessageProcessor): tracker = DialogueStateTracker.from_events( - "some-sender", evts=[ActionExecuted(ACTION_LISTEN_NAME),], + "some-sender", + evts=[ + ActionExecuted(ACTION_LISTEN_NAME), + ], ) domain = Domain.empty() metadata = {"key1": 1, "key2": "2"} @@ -1002,7 +1013,10 @@ async def test_restart_triggers_session_start( SlotSet(entity, slot_1[entity]), DefinePrevUserUtteredFeaturization(use_text_for_featurization=False), ActionExecuted("utter_greet"), - BotUttered("hey there name1!", metadata={"utter_action": "utter_greet"},), + BotUttered( + "hey there name1!", + metadata={"utter_action": "utter_greet"}, + ), ActionExecuted(ACTION_LISTEN_NAME), UserUttered("/restart", {INTENT_NAME_KEY: "restart", "confidence": 1.0}), DefinePrevUserUtteredFeaturization(use_text_for_featurization=False), @@ -1165,7 +1179,10 @@ async def mocked_run(*args: Any, **kwargs: Any) -> List[Event]: ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted(), ActionExecuted(ACTION_LISTEN_NAME), - UserUttered(user_message, intent={"name": "greet"},), + UserUttered( + user_message, + intent={"name": "greet"}, + ), ActionExecutionRejected(ACTION_LISTEN_NAME), ], model_id, @@ -1175,7 +1192,8 @@ async def mocked_run(*args: Any, **kwargs: Any) -> List[Event]: async def test_logging_of_end_to_end_action( - default_processor: MessageProcessor, monkeypatch: MonkeyPatch, + default_processor: MessageProcessor, + monkeypatch: MonkeyPatch, ): model_id = default_processor.model_metadata.model_id end_to_end_action = "hi, how are you?" @@ -1228,7 +1246,10 @@ def combine_predictions( ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted(), ActionExecuted(ACTION_LISTEN_NAME), - UserUttered(user_message, intent={"name": "greet"},), + UserUttered( + user_message, + intent={"name": "greet"}, + ), ActionExecuted(action_text=end_to_end_action), BotUttered("hi, how are you?", {}, {}, 123), ActionExecuted(ACTION_LISTEN_NAME), @@ -1481,7 +1502,11 @@ def test_get_tracker_adds_model_id(default_processor: MessageProcessor): async def test_processor_e2e_slot_set(e2e_bot_agent: Agent, caplog: LogCaptureFixture): processor = e2e_bot_agent.processor - message = UserMessage("I am feeling sad.", CollectingOutputChannel(), "test",) + message = UserMessage( + "I am feeling sad.", + CollectingOutputChannel(), + "test", + ) with caplog.at_level(logging.DEBUG): await processor.handle_message(message) diff --git a/tests/graph_components/validators/test_default_recipe_validator.py b/tests/graph_components/validators/test_default_recipe_validator.py index 74738089eb7e..2d431d6d1c62 100644 --- a/tests/graph_components/validators/test_default_recipe_validator.py +++ b/tests/graph_components/validators/test_default_recipe_validator.py @@ -115,10 +115,18 @@ def _test_validation_warnings_with_default_configs( @pytest.mark.parametrize( - "component_type, warns", [(ResponseSelector, False,), (None, True)] + "component_type, warns", + [ + ( + ResponseSelector, + False, + ), + (None, True), + ], ) def test_nlu_warn_if_training_examples_with_intent_response_key_are_unused( - component_type: Type[GraphComponent], warns: bool, + component_type: Type[GraphComponent], + warns: bool, ): messages = [ Message( @@ -154,7 +162,9 @@ def test_nlu_warn_if_training_examples_with_intent_response_key_are_unused( if component_type: component_types.append(component_type) _test_validation_warnings_with_default_configs( - training_data=training_data, component_types=component_types, warnings=warnings, + training_data=training_data, + component_types=component_types, + warnings=warnings, ) @@ -163,7 +173,8 @@ def test_nlu_warn_if_training_examples_with_intent_response_key_are_unused( [(extractor, False) for extractor in TRAINABLE_EXTRACTORS] + [(None, True)], ) def test_nlu_warn_if_training_examples_with_entities_are_unused( - component_type: Type[GraphComponent], warns: bool, + component_type: Type[GraphComponent], + warns: bool, ): messages = [ Message( @@ -192,7 +203,9 @@ def test_nlu_warn_if_training_examples_with_entities_are_unused( if component_type: component_types.append(component_type) _test_validation_warnings_with_default_configs( - training_data=training_data, component_types=component_types, warnings=warnings, + training_data=training_data, + component_types=component_types, + warnings=warnings, ) @@ -209,7 +222,9 @@ def test_nlu_warn_if_training_examples_with_entities_are_unused( ], ) def test_nlu_warn_if_training_examples_with_entity_roles_are_unused( - component_type: Type[GraphComponent], role_instead_of_group: bool, warns: bool, + component_type: Type[GraphComponent], + role_instead_of_group: bool, + warns: bool, ): messages = [ Message( @@ -243,13 +258,19 @@ def test_nlu_warn_if_training_examples_with_entity_roles_are_unused( if component_type: component_types.append(component_type) _test_validation_warnings_with_default_configs( - training_data=training_data, component_types=component_types, warnings=warnings, + training_data=training_data, + component_types=component_types, + warnings=warnings, ) @pytest.mark.parametrize( "component_type, warns", - [(RegexFeaturizer, False), (RegexEntityExtractor, False), (None, True),], + [ + (RegexFeaturizer, False), + (RegexEntityExtractor, False), + (None, True), + ], ) def test_nlu_warn_if_regex_features_are_not_used( component_type: Type[GraphComponent], warns: bool @@ -280,7 +301,10 @@ def test_nlu_warn_if_regex_features_are_not_used( + [ (featurizer, consumer, False, False) for consumer in [DIETClassifier, CRFEntityExtractor] - for featurizer in [RegexFeaturizer, RegexEntityExtractor,] + for featurizer in [ + RegexFeaturizer, + RegexEntityExtractor, + ] ], ) def test_nlu_warn_if_lookup_table_is_not_used( @@ -326,7 +350,13 @@ def test_nlu_warn_if_lookup_table_is_not_used( [ SchemaNode({}, WhitespaceTokenizer, "", "", {}), SchemaNode({}, RegexFeaturizer, "", "", {}), - SchemaNode({}, CRFEntityExtractor, "", "", {"features": [["pos"]]},), + SchemaNode( + {}, + CRFEntityExtractor, + "", + "", + {"features": [["pos"]]}, + ), ], True, ), @@ -348,7 +378,13 @@ def test_nlu_warn_if_lookup_table_is_not_used( [ SchemaNode({}, WhitespaceTokenizer, "", "", {}), SchemaNode({}, RegexFeaturizer, "", "", {}), - SchemaNode({}, CRFEntityExtractor, "", "", {"features": [["pos"]]},), + SchemaNode( + {}, + CRFEntityExtractor, + "", + "", + {"features": [["pos"]]}, + ), SchemaNode( {}, CRFEntityExtractor, @@ -393,8 +429,21 @@ def test_nlu_warn_if_lookup_table_and_crf_extractor_pattern_feature_mismatch( @pytest.mark.parametrize( "components, warns", [ - ([WhitespaceTokenizer, CRFEntityExtractor,], True,), - ([WhitespaceTokenizer, CRFEntityExtractor, EntitySynonymMapper,], False,), + ( + [ + WhitespaceTokenizer, + CRFEntityExtractor, + ], + True, + ), + ( + [ + WhitespaceTokenizer, + CRFEntityExtractor, + EntitySynonymMapper, + ], + False, + ), ], ) def test_nlu_warn_if_entity_synonyms_unused( @@ -494,7 +543,14 @@ def test_nlu_do_not_raise_if_trainable_tokenizer(): ], True, ), - ([WhitespaceTokenizer, LexicalSyntacticFeaturizer, DIETClassifier,], False,), + ( + [ + WhitespaceTokenizer, + LexicalSyntacticFeaturizer, + DIETClassifier, + ], + False, + ), ], ) def test_nlu_warn_of_competing_extractors( @@ -531,12 +587,20 @@ def test_nlu_warn_of_competing_extractors( True, ), ( - [WhitespaceTokenizer, LexicalSyntacticFeaturizer, RegexEntityExtractor,], + [ + WhitespaceTokenizer, + LexicalSyntacticFeaturizer, + RegexEntityExtractor, + ], "data/test/overlapping_regex_entities.yml", False, ), ( - [WhitespaceTokenizer, LexicalSyntacticFeaturizer, DIETClassifier,], + [ + WhitespaceTokenizer, + LexicalSyntacticFeaturizer, + DIETClassifier, + ], "data/test/overlapping_regex_entities.yml", False, ), @@ -558,7 +622,9 @@ def test_nlu_warn_of_competition_with_regex_extractor( data_path: Text, should_warn: bool, ): - importer = TrainingDataImporter.load_from_dict(training_data_paths=[data_path],) + importer = TrainingDataImporter.load_from_dict( + training_data_paths=[data_path], + ) # there are no domain files for the above examples, so: monkeypatch.setattr(Domain, "check_missing_responses", lambda *args, **kwargs: None) @@ -690,7 +756,12 @@ def test_nlu_raise_if_featurizers_are_not_compatible( @pytest.mark.parametrize( - "policy_type", [TEDPolicy, RulePolicy, MemoizationPolicy,], + "policy_type", + [ + TEDPolicy, + RulePolicy, + MemoizationPolicy, + ], ) def test_core_warn_if_data_but_no_policy( monkeypatch: MonkeyPatch, policy_type: Optional[Type[Policy]] @@ -753,7 +824,9 @@ def test_core_warn_if_data_but_no_policy( ], ) def test_core_warn_if_no_rule_policy( - monkeypatch: MonkeyPatch, policy_types: List[Type[Policy]], should_warn: bool, + monkeypatch: MonkeyPatch, + policy_types: List[Type[Policy]], + should_warn: bool, ): graph_schema = GraphSchema( { @@ -795,7 +868,9 @@ def test_core_warn_if_no_rule_policy( ], ) def test_core_raise_if_domain_contains_form_names_but_no_rule_policy_given( - monkeypatch: MonkeyPatch, policy_types: List[Type[Policy]], should_raise: bool, + monkeypatch: MonkeyPatch, + policy_types: List[Type[Policy]], + should_raise: bool, ): domain_with_form = Domain.from_dict( {KEY_FORMS: {"some-form": {"required_slots": []}}} @@ -919,7 +994,9 @@ def test_core_warn_if_policy_priorities_are_not_unique( @pytest.mark.parametrize("policy_type_consuming_rule_data", [RulePolicy]) -def test_core_warn_if_rule_data_missing(policy_type_consuming_rule_data: Type[Policy],): +def test_core_warn_if_rule_data_missing( + policy_type_consuming_rule_data: Type[Policy], +): importer = TrainingDataImporter.load_from_dict( domain_path="data/test_e2ebot/domain.yml", @@ -945,7 +1022,11 @@ def test_core_warn_if_rule_data_missing(policy_type_consuming_rule_data: Type[Po @pytest.mark.parametrize( - "policy_type_not_consuming_rule_data", [TEDPolicy, MemoizationPolicy,], + "policy_type_not_consuming_rule_data", + [ + TEDPolicy, + MemoizationPolicy, + ], ) def test_core_warn_if_rule_data_unused( policy_type_not_consuming_rule_data: Type[Policy], diff --git a/tests/nlu/classifiers/test_diet_classifier.py b/tests/nlu/classifiers/test_diet_classifier.py index c9a49b80f59b..80a75edbc716 100644 --- a/tests/nlu/classifiers/test_diet_classifier.py +++ b/tests/nlu/classifiers/test_diet_classifier.py @@ -223,7 +223,9 @@ def test_compute_default_label_features(): ], ) def test_check_labels_features_exist( - messages: List[Message], expected: bool, create_diet: Callable[..., DIETClassifier], + messages: List[Message], + expected: bool, + create_diet: Callable[..., DIETClassifier], ): attribute = TEXT classifier = create_diet({}) @@ -258,8 +260,12 @@ def test_check_labels_features_exist( ), ( [ - Message(data={TEXT: "test a", INTENT: "intent a"},), - Message(data={TEXT: "test b", INTENT: "intent b"},), + Message( + data={TEXT: "test a", INTENT: "intent a"}, + ), + Message( + data={TEXT: "test b", INTENT: "intent b"}, + ), ], False, ), @@ -341,7 +347,11 @@ async def test_train_persist_load_with_only_intent_classification( create_diet: Callable[..., DIETClassifier], ): create_train_load_and_process_diet( - {ENTITY_RECOGNITION: False, INTENT_CLASSIFICATION: True, EPOCHS: 1,}, + { + ENTITY_RECOGNITION: False, + INTENT_CLASSIFICATION: True, + EPOCHS: 1, + }, ) create_diet({MASKED_LM: True, EPOCHS: 1}, load=True, finetune=True) @@ -686,12 +696,12 @@ async def test_adjusting_layers_incremental_training( train_load_and_process_diet: Callable[..., Message], ): """Tests adjusting sparse layers of `DIETClassifier` to increased sparse - feature sizes during incremental training. + feature sizes during incremental training. - Testing is done by checking the layer sizes. - Checking if they were replaced correctly is also important - and is done in `test_replace_dense_for_sparse_layers` - in `test_rasa_layers.py`. + Testing is done by checking the layer sizes. + Checking if they were replaced correctly is also important + and is done in `test_replace_dense_for_sparse_layers` + in `test_rasa_layers.py`. """ iter1_data_path = "data/test_incremental_training/iter1/" iter2_data_path = "data/test_incremental_training/" @@ -709,7 +719,9 @@ async def test_adjusting_layers_incremental_training( ] classifier = create_diet({EPOCHS: 1}) processed_message = train_load_and_process_diet( - classifier, pipeline=pipeline, training_data=iter1_data_path, + classifier, + pipeline=pipeline, + training_data=iter1_data_path, ) old_data_signature = classifier.model.data_signature @@ -739,7 +751,9 @@ async def test_adjusting_layers_incremental_training( finetune_classifier = create_diet({EPOCHS: 1}, load=True, finetune=True) assert finetune_classifier.finetune_mode processed_message_finetuned = train_load_and_process_diet( - finetune_classifier, pipeline=pipeline, training_data=iter2_data_path, + finetune_classifier, + pipeline=pipeline, + training_data=iter2_data_path, ) new_sparse_feature_sizes = processed_message_finetuned.get_sparse_feature_sizes( @@ -856,7 +870,9 @@ async def test_sparse_feature_sizes_decreased_incremental_training( classifier = create_diet({EPOCHS: 1}) assert not classifier.finetune_mode train_load_and_process_diet( - classifier, pipeline=pipeline, training_data=iter1_path, + classifier, + pipeline=pipeline, + training_data=iter1_path, ) finetune_classifier = create_diet({EPOCHS: 1}, load=True, finetune=True) @@ -865,10 +881,14 @@ async def test_sparse_feature_sizes_decreased_incremental_training( if should_raise_exception: with pytest.raises(Exception) as exec_info: train_load_and_process_diet( - finetune_classifier, pipeline=pipeline, training_data=iter2_path, + finetune_classifier, + pipeline=pipeline, + training_data=iter2_path, ) assert "Sparse feature sizes have decreased" in str(exec_info.value) else: train_load_and_process_diet( - finetune_classifier, pipeline=pipeline, training_data=iter2_path, + finetune_classifier, + pipeline=pipeline, + training_data=iter2_path, ) diff --git a/tests/nlu/featurizers/test_lm_featurizer.py b/tests/nlu/featurizers/test_lm_featurizer.py index fa430ae10b70..a6f33916cb6f 100644 --- a/tests/nlu/featurizers/test_lm_featurizer.py +++ b/tests/nlu/featurizers/test_lm_featurizer.py @@ -36,7 +36,10 @@ def create_language_model_featurizer( ) -> Callable[[Dict[Text, Any]], LanguageModelFeaturizer]: def inner(config: Dict[Text, Any]) -> LanguageModelFeaturizer: return LanguageModelFeaturizer.create( - config={**LanguageModelFeaturizer.get_default_config(), **config,}, + config={ + **LanguageModelFeaturizer.get_default_config(), + **config, + }, model_storage=default_model_storage, resource=resource_language_model_featurizer, execution_context=default_execution_context, @@ -90,7 +93,7 @@ def process_training_text( ], whitespace_tokenizer: WhitespaceTokenizer, ) -> List[Message]: - """ Creates a featurizer and process training data """ + """Creates a featurizer and process training data""" config = create_pretrained_transformers_config(model_name, model_weights) lm_featurizer = create_language_model_featurizer(config) @@ -111,7 +114,7 @@ def process_messages( ], whitespace_tokenizer: WhitespaceTokenizer, ) -> List[Message]: - """ Creates a featurizer and processes messages """ + """Creates a featurizer and processes messages""" config = create_pretrained_transformers_config(model_name, model_weights) lm_featurizer = create_language_model_featurizer(config) @@ -554,7 +557,7 @@ def check_subtokens( expected_number_of_sub_tokens: List[List[float]], whitespace_tokenizer: WhitespaceTokenizer, ): - """ Checks that we get the correct number of sub tokens """ + """Checks that we get the correct number of sub tokens""" for index, message in enumerate(messages): assert [ t.get(NUMBER_OF_SUB_TOKENS) for t in message.get(TOKENS_NAMES[TEXT]) @@ -576,7 +579,7 @@ def test_lm_featurizer_num_sub_tokens_process_training_data( whitespace_tokenizer: WhitespaceTokenizer, ): """Tests the number of sub tokens when calling the function - process training data """ + process training data""" messages = process_training_text( texts, model_name, @@ -601,7 +604,7 @@ def test_lm_featurizer_num_sub_tokens_process_messages( whitespace_tokenizer: WhitespaceTokenizer, ): """Tests the number of sub tokens when calling the function - process (messages) """ + process (messages)""" messages = process_messages( texts, model_name, @@ -628,7 +631,9 @@ def test_sequence_length_overflow_train( monkeypatch: MonkeyPatch, ): monkeypatch.setattr( - LanguageModelFeaturizer, "_load_model_instance", lambda _: None, + LanguageModelFeaturizer, + "_load_model_instance", + lambda _: None, ) component = create_language_model_featurizer({"model_name": model_name}) message = Message.build(text=" ".join(["hi"] * input_sequence_length)) @@ -662,7 +667,9 @@ def test_long_sequences_extra_padding( monkeypatch: MonkeyPatch, ): monkeypatch.setattr( - LanguageModelFeaturizer, "_load_model_instance", lambda _: None, + LanguageModelFeaturizer, + "_load_model_instance", + lambda _: None, ) component = create_language_model_featurizer({"model_name": model_name}) modified_sequence_embeddings = component._add_extra_padding( @@ -701,7 +708,9 @@ def test_input_padding( monkeypatch: MonkeyPatch, ): monkeypatch.setattr( - LanguageModelFeaturizer, "_load_model_instance", lambda _: None, + LanguageModelFeaturizer, + "_load_model_instance", + lambda _: None, ) component = create_language_model_featurizer({"model_name": "bert"}) component.pad_token_id = 0 @@ -759,7 +768,9 @@ def test_attention_mask( monkeypatch: MonkeyPatch, ): monkeypatch.setattr( - LanguageModelFeaturizer, "_load_model_instance", lambda _: None, + LanguageModelFeaturizer, + "_load_model_instance", + lambda _: None, ) component = create_language_model_featurizer({"model_name": "bert"}) diff --git a/tests/nlu/selectors/test_selectors.py b/tests/nlu/selectors/test_selectors.py index d08f5b2054e6..676773e3547e 100644 --- a/tests/nlu/selectors/test_selectors.py +++ b/tests/nlu/selectors/test_selectors.py @@ -150,7 +150,12 @@ def inner( "config_params", [ {EPOCHS: 1}, - {EPOCHS: 1, MASKED_LM: True, TRANSFORMER_SIZE: 256, NUM_TRANSFORMER_LAYERS: 1,}, + { + EPOCHS: 1, + MASKED_LM: True, + TRANSFORMER_SIZE: 256, + NUM_TRANSFORMER_LAYERS: 1, + }, ], ) def test_train_selector( @@ -269,7 +274,9 @@ def test_resolve_intent_response_key_from_label( create_response_selector: Callable[[Dict[Text, Any]], ResponseSelector], ): - response_selector = create_response_selector({"use_text_as_label": train_on_text},) + response_selector = create_response_selector( + {"use_text_as_label": train_on_text}, + ) response_selector.preprocess_train_data(response_selector_training_data) label_intent_response_key = response_selector._resolve_intent_response_key( @@ -348,11 +355,15 @@ def test_train_persist_load( config_params = {EPOCHS: 1} train_persist_load_with_different_settings( - pipeline, config_params, False, + pipeline, + config_params, + False, ) train_persist_load_with_different_settings( - pipeline, config_params, True, + pipeline, + config_params, + True, ) @@ -406,7 +417,8 @@ async def test_process_gives_diagnostic_data( @pytest.mark.parametrize( - "classifier_params", [({LOSS_TYPE: "margin", RANDOM_SEED: 42, EPOCHS: 1})], + "classifier_params", + [({LOSS_TYPE: "margin", RANDOM_SEED: 42, EPOCHS: 1})], ) async def test_margin_loss_is_not_normalized( classifier_params: Dict[Text, int], @@ -599,7 +611,9 @@ def test_sets_integer_transformer_size_when_needed( ) -def test_transformer_size_gets_corrected(train_persist_load_with_different_settings,): +def test_transformer_size_gets_corrected( + train_persist_load_with_different_settings, +): """Tests that the default value of `transformer_size` which is `None` is corrected if transformer layers are enabled in `ResponseSelector`. """ @@ -610,7 +624,9 @@ def test_transformer_size_gets_corrected(train_persist_load_with_different_setti config_params = {EPOCHS: 1, NUM_TRANSFORMER_LAYERS: 1} selector = train_persist_load_with_different_settings( - pipeline, config_params, False, + pipeline, + config_params, + False, ) assert selector.component_config[TRANSFORMER_SIZE] == DEFAULT_TRANSFORMER_SIZE @@ -623,12 +639,12 @@ async def test_adjusting_layers_incremental_training( process_message: Callable[..., Message], ): """Tests adjusting sparse layers of `ResponseSelector` to increased sparse - feature sizes during incremental training. + feature sizes during incremental training. - Testing is done by checking the layer sizes. - Checking if they were replaced correctly is also important - and is done in `test_replace_dense_for_sparse_layers` - in `test_rasa_layers.py`. + Testing is done by checking the layer sizes. + Checking if they were replaced correctly is also important + and is done in `test_replace_dense_for_sparse_layers` + in `test_rasa_layers.py`. """ iter1_data_path = "data/test_incremental_training/iter1/" iter2_data_path = "data/test_incremental_training/" diff --git a/tests/nlu/test_train.py b/tests/nlu/test_train.py index ecc544a0354e..15a6df0af1dc 100644 --- a/tests/nlu/test_train.py +++ b/tests/nlu/test_train.py @@ -76,7 +76,9 @@ def pipelines_for_tests() -> List[Tuple[Text, List[Dict[Text, Any]]]]: ( "en", as_pipeline( - "WhitespaceTokenizer", "LanguageModelFeaturizer", "DIETClassifier", + "WhitespaceTokenizer", + "LanguageModelFeaturizer", + "DIETClassifier", ), ), ("fallback", as_pipeline("KeywordIntentClassifier", "FallbackClassifier")), @@ -160,7 +162,9 @@ async def test_train_persist_load_parse( ) persisted_path = rasa.model_training.train_nlu( - str(config_file), nlu_as_json_path, output=str(tmp_path), + str(config_file), + nlu_as_json_path, + output=str(tmp_path), ) assert Path(persisted_path).is_file() @@ -182,11 +186,18 @@ def test_train_persist_load_parse_non_windows( def test_train_model_empty_pipeline(nlu_as_json_path: Text, tmp_path: Path): config_file = tmp_path / "config.yml" - rasa.shared.utils.io.dump_obj_as_json_to_file(config_file, {"pipeline": [],}) + rasa.shared.utils.io.dump_obj_as_json_to_file( + config_file, + { + "pipeline": [], + }, + ) with pytest.raises(ValueError): rasa.model_training.train_nlu( - str(config_file), nlu_as_json_path, output=str(tmp_path), + str(config_file), + nlu_as_json_path, + output=str(tmp_path), ) @@ -203,10 +214,13 @@ def test_handles_pipeline_with_non_existing_component( ) with pytest.raises( - Exception, match="Can't load class for name 'my_made_up_component'", + Exception, + match="Can't load class for name 'my_made_up_component'", ): rasa.model_training.train_nlu( - str(config_file), nlu_as_json_path, output=str(tmp_path), + str(config_file), + nlu_as_json_path, + output=str(tmp_path), ) diff --git a/tests/utils/tensorflow/test_models.py b/tests/utils/tensorflow/test_models.py index 33ef135d013c..59f4903ed283 100644 --- a/tests/utils/tensorflow/test_models.py +++ b/tests/utils/tensorflow/test_models.py @@ -69,7 +69,11 @@ def test_equal_dicts( @pytest.mark.parametrize( "batch_size, number_of_data_points, expected_number_of_batch_iterations", - [(2, 3, 2), (1, 3, 3), (5, 3, 1),], + [ + (2, 3, 2), + (1, 3, 3), + (5, 3, 1), + ], ) def test_batch_inference( batch_size: int, @@ -204,7 +208,7 @@ def test_raise_exception_decreased_sparse_feature_sizes( raise_exception: bool, ): """Tests if exception is raised when sparse feature sizes decrease - during incremental training.""" + during incremental training.""" if raise_exception: with pytest.raises(Exception) as exec_info: TransformerRasaModel._check_if_sparse_feature_sizes_decreased( diff --git a/tests/utils/tensorflow/test_rasa_layers.py b/tests/utils/tensorflow/test_rasa_layers.py index 6b999322a5a2..6f456567a78a 100644 --- a/tests/utils/tensorflow/test_rasa_layers.py +++ b/tests/utils/tensorflow/test_rasa_layers.py @@ -88,7 +88,10 @@ }, ) -model_config_transformer_mlm = dict(model_config_transformer, **{MASKED_LM: True},) +model_config_transformer_mlm = dict( + model_config_transformer, + **{MASKED_LM: True}, +) # Dummy feature signatures and features (full of 1s) for tests that don't check exact @@ -326,7 +329,11 @@ def test_layer_gives_correct_output_units( SENTENCE: [feature_signature_dense_1], } }, - ([feature_dense_seq_1], [feature_dense_sent_1], sequence_lengths,), + ( + [feature_dense_seq_1], + [feature_dense_sent_1], + sequence_lengths, + ), [ [batch_size, max_seq_length + 1, units_1], [batch_size, max_seq_length + 1, 1], @@ -385,8 +392,12 @@ def test_layer_gives_correct_output_units( [batch_size, max_seq_length + 1, units_transformer], [batch_size, max_seq_length + 1, units_hidden_layer], [batch_size, max_seq_length + 1, 1], - [0,], - [0,], + [ + 0, + ], + [ + 0, + ], [ batch_size, num_transformer_layers, @@ -406,9 +417,15 @@ def test_layer_gives_correct_output_units( [batch_size, max_seq_length + 1, units_hidden_layer], [batch_size, max_seq_length + 1, units_hidden_layer], [batch_size, max_seq_length + 1, 1], - [0,], - [0,], - [0,], + [ + 0, + ], + [ + 0, + ], + [ + 0, + ], ], "same_as_train", ), @@ -422,9 +439,15 @@ def test_layer_gives_correct_output_units( [batch_size, max_seq_length + 1, units_concat], [batch_size, max_seq_length + 1, units_concat], [batch_size, max_seq_length + 1, 1], - [0,], - [0,], - [0,], + [ + 0, + ], + [ + 0, + ], + [ + 0, + ], ], "same_as_train", ), @@ -439,7 +462,11 @@ def test_layer_gives_correct_output_units( SENTENCE: [], } }, - ([feature_sparse_seq_1], [], sequence_lengths,), + ( + [feature_sparse_seq_1], + [], + sequence_lengths, + ), [ [batch_size, max_seq_length, units_transformer], [batch_size, max_seq_length, units_hidden_layer], @@ -458,8 +485,12 @@ def test_layer_gives_correct_output_units( [batch_size, max_seq_length, units_transformer], [batch_size, max_seq_length, units_hidden_layer], [batch_size, max_seq_length, 1], - [0,], - [0,], + [ + 0, + ], + [ + 0, + ], [ batch_size, num_transformer_layers, @@ -479,7 +510,11 @@ def test_correct_output_shape( expected_output_shapes_train: List[List[int]], expected_output_shapes_test: Union[Text, List[List[int]]], ) -> None: - layer = layer_class(**layer_args, attribute=attribute_name, config=model_config,) + layer = layer_class( + **layer_args, + attribute=attribute_name, + config=model_config, + ) train_outputs = layer(layer_inputs, training=True) if not isinstance(train_outputs, tuple): @@ -547,7 +582,11 @@ def test_concat_sparse_dense_raises_exception_when_inconsistent_sparse_features( is_sparse=False, units=1, number_of_dimensions=3 ) realistic_feature_dense_seq_1 = tf.convert_to_tensor( - [[[10.0], [20.0], [30.0]], [[40.0], [50.0], [0.0]],], dtype=tf.float32 + [ + [[10.0], [20.0], [30.0]], + [[40.0], [50.0], [0.0]], + ], + dtype=tf.float32, ) realistic_feature_signature_dense_2 = FeatureSignature( @@ -562,7 +601,8 @@ def test_concat_sparse_dense_raises_exception_when_inconsistent_sparse_features( is_sparse=False, units=3, number_of_dimensions=3 ) realistic_feature_dense_sent_3 = tf.convert_to_tensor( - [[[0.1, 0.2, 0.3]], [[0.4, 0.5, 0.6]]], dtype=tf.float32, + [[[0.1, 0.2, 0.3]], [[0.4, 0.5, 0.6]]], + dtype=tf.float32, ) realistic_sequence_lengths = tf.convert_to_tensor([3, 2], dtype=tf.int32) @@ -750,7 +790,9 @@ def test_feature_combining_correct_output( realistic_feature_signature_dense_1, realistic_feature_signature_dense_2, ], - SENTENCE: [realistic_feature_signature_dense_3,], + SENTENCE: [ + realistic_feature_signature_dense_3, + ], }, ( [realistic_feature_dense_seq_1, realistic_feature_dense_seq_2], @@ -798,8 +840,16 @@ def test_feature_combining_correct_output( ( np.array( [ - [[10.0, 1.0, 2.0], [20.0, 3.0, 4.0], [30.0, 5.0, 6.0],], - [[40.0, 1.5, 2.5], [50.0, 3.5, 4.5], [0.0, 0.0, 0.0],], + [ + [10.0, 1.0, 2.0], + [20.0, 3.0, 4.0], + [30.0, 5.0, 6.0], + ], + [ + [40.0, 1.5, 2.5], + [50.0, 3.5, 4.5], + [0.0, 0.0, 0.0], + ], ], dtype=np.float32, ), @@ -827,9 +877,14 @@ def test_sequence_layer_correct_output( mask_seq_sent_expected, token_ids_expected, ) = expected_outputs_train - (_, seq_sent_features, mask_seq_sent, token_ids, mlm_boolean_mask, _,) = layer( - inputs, training=True - ) + ( + _, + seq_sent_features, + mask_seq_sent, + token_ids, + mlm_boolean_mask, + _, + ) = layer(inputs, training=True) assert (seq_sent_features.numpy() == seq_sent_features_expected).all() assert (mask_seq_sent.numpy() == mask_seq_sent_expected).all() assert (token_ids.numpy() == token_ids_expected).all() @@ -842,7 +897,11 @@ def test_sequence_layer_correct_output( assert not mlm_boolean_mask.numpy()[0][realistic_sequence_lengths.numpy()][0] # Test-time check - (seq_sent_features_expected, mask_seq_sent_expected, _,) = expected_outputs_train + ( + seq_sent_features_expected, + mask_seq_sent_expected, + _, + ) = expected_outputs_train ( transformer_outputs, seq_sent_features, From 0b5ad7130712bc8801056ab058fc1b4d4a707919 Mon Sep 17 00:00:00 2001 From: carlad Date: Wed, 12 Jan 2022 13:12:27 +0100 Subject: [PATCH 4/9] black reformatting --- rasa/cli/arguments/evaluate.py | 11 +- rasa/cli/arguments/run.py | 4 +- rasa/cli/data.py | 4 +- rasa/cli/evaluate.py | 7 +- rasa/cli/telemetry.py | 4 +- rasa/cli/test.py | 6 +- rasa/cli/x.py | 8 +- rasa/core/actions/action.py | 5 +- rasa/core/actions/forms.py | 43 ++-- rasa/core/agent.py | 12 +- rasa/core/brokers/kafka.py | 3 +- rasa/core/channels/socketio.py | 4 +- rasa/core/channels/twilio_voice.py | 9 +- rasa/core/evaluation/marker_base.py | 17 +- rasa/core/featurizers/precomputation.py | 4 +- .../featurizers/single_state_featurizer.py | 12 +- rasa/core/featurizers/tracker_featurizers.py | 8 +- rasa/core/http_interpreter.py | 5 +- rasa/core/migrate.py | 9 +- rasa/core/nlg/response.py | 4 +- rasa/core/policies/ensemble.py | 4 +- rasa/core/policies/memoization.py | 12 +- rasa/core/policies/policy.py | 6 +- rasa/core/policies/rule_policy.py | 7 +- rasa/core/policies/ted_policy.py | 35 ++- .../core/policies/unexpected_intent_policy.py | 32 ++- rasa/core/processor.py | 19 +- rasa/core/run.py | 13 +- rasa/core/test.py | 9 +- rasa/core/tracker_store.py | 8 +- rasa/core/training/__init__.py | 8 +- rasa/core/training/interactive.py | 9 +- rasa/core/training/story_conflict.py | 8 +- rasa/core/utils.py | 3 +- rasa/engine/graph.py | 3 +- rasa/engine/recipes/default_recipe.py | 44 +++- rasa/engine/runner/dask.py | 5 +- rasa/engine/storage/local_model_storage.py | 15 +- rasa/engine/storage/storage.py | 4 +- rasa/engine/training/components.py | 7 +- rasa/engine/training/graph_trainer.py | 4 +- rasa/engine/validation.py | 28 +- .../providers/nlu_training_data_provider.py | 10 +- .../providers/story_graph_provider.py | 5 +- .../validators/default_recipe_validator.py | 10 +- .../validators/finetuning_validator.py | 14 +- rasa/model_training.py | 6 +- rasa/nlu/classifiers/diet_classifier.py | 8 +- rasa/nlu/classifiers/fallback_classifier.py | 5 +- .../classifiers/keyword_intent_classifier.py | 11 +- .../classifiers/sklearn_intent_classifier.py | 8 +- rasa/nlu/extractors/entity_synonyms.py | 5 +- rasa/nlu/extractors/mitie_entity_extractor.py | 13 +- rasa/nlu/extractors/regex_entity_extractor.py | 6 +- .../dense_featurizer/convert_featurizer.py | 5 +- .../dense_featurizer/lm_featurizer.py | 9 +- .../dense_featurizer/mitie_featurizer.py | 4 +- .../dense_featurizer/spacy_featurizer.py | 6 +- rasa/nlu/featurizers/featurizer.py | 7 +- .../count_vectors_featurizer.py | 4 +- .../lexical_syntactic_featurizer.py | 6 +- .../sparse_featurizer/regex_featurizer.py | 5 +- rasa/nlu/selectors/response_selector.py | 3 +- rasa/nlu/test.py | 13 +- rasa/nlu/tokenizers/jieba_tokenizer.py | 5 +- rasa/server.py | 3 +- rasa/shared/core/domain.py | 13 +- rasa/shared/core/events.py | 10 +- rasa/shared/core/slot_mappings.py | 11 +- rasa/shared/core/training_data/loading.py | 22 +- .../story_reader/story_reader.py | 4 +- .../story_reader/yaml_story_reader.py | 5 +- .../core/training_data/visualization.py | 4 +- rasa/shared/data.py | 4 +- rasa/shared/importers/importer.py | 25 +- rasa/shared/importers/multi_project.py | 9 +- rasa/shared/importers/rasa.py | 12 +- rasa/shared/importers/utils.py | 4 +- rasa/shared/nlu/training_data/features.py | 13 +- .../nlu/training_data/formats/dialogflow.py | 14 +- rasa/shared/utils/schemas/events.py | 5 +- rasa/telemetry.py | 11 +- rasa/utils/common.py | 8 +- rasa/utils/plotting.py | 5 +- rasa/utils/tensorflow/layers.py | 8 +- rasa/utils/tensorflow/layers_utils.py | 3 +- rasa/utils/tensorflow/models.py | 17 +- rasa/utils/tensorflow/rasa_layers.py | 5 +- rasa/utils/train_utils.py | 3 +- rasa/validator.py | 4 +- tests/cli/test_rasa_evaluate_markers.py | 4 +- tests/cli/test_rasa_init.py | 7 +- tests/cli/test_rasa_test.py | 12 +- tests/cli/test_rasa_train.py | 3 +- tests/cli/test_rasa_x.py | 3 +- tests/conftest.py | 16 +- tests/core/actions/test_forms.py | 51 +++- tests/core/conftest.py | 5 +- tests/core/featurizers/test_precomputation.py | 6 +- .../featurizers/test_tracker_featurizer.py | 243 +++++++++++++----- tests/core/policies/test_rule_policy.py | 37 ++- tests/core/policies/test_ted_policy.py | 52 +++- .../policies/test_unexpected_intent_policy.py | 37 ++- tests/core/test_actions.py | 109 ++++++-- tests/core/test_agent.py | 15 +- tests/core/test_ensemble.py | 10 +- tests/core/test_evaluation.py | 40 ++- tests/core/test_migrate.py | 8 +- tests/core/test_policies.py | 41 ++- tests/core/test_test.py | 13 +- tests/core/test_tracker_stores.py | 19 +- tests/core/test_training.py | 5 +- tests/core/training/test_interactive.py | 16 +- tests/core/training/test_story_conflict.py | 15 +- tests/dialogues.py | 23 +- tests/docs/test_docs_use_base_url.py | 3 +- tests/engine/recipes/test_default_recipe.py | 42 ++- .../storage/test_local_model_storage.py | 14 +- tests/engine/test_caching.py | 4 +- tests/engine/test_loader.py | 7 +- tests/engine/test_validation.py | 124 ++++++--- tests/engine/training/test_fingerprinting.py | 24 +- tests/engine/training/test_graph_trainer.py | 16 +- tests/engine/training/test_hooks.py | 6 +- .../converters/test_nlu_message_converter.py | 14 +- .../providers/test_story_graph_provider.py | 7 +- .../validators/test_finetuning_validator.py | 30 ++- .../classifiers/test_fallback_classifier.py | 3 +- .../classifiers/test_keyword_classifier.py | 25 +- .../classifiers/test_sklearn_classifier.py | 8 +- tests/nlu/conftest.py | 15 +- .../test_duckling_entity_extractor.py | 3 +- tests/nlu/extractors/test_extractor.py | 4 +- .../extractors/test_mitie_entity_extractor.py | 5 +- .../extractors/test_regex_entity_extractor.py | 17 +- .../featurizers/test_convert_featurizer.py | 23 +- .../test_count_vectors_featurizer.py | 36 ++- tests/nlu/featurizers/test_featurizer.py | 14 +- .../test_lexical_syntactic_featurizer.py | 30 ++- .../nlu/featurizers/test_mitie_featurizer.py | 5 +- .../nlu/featurizers/test_regex_featurizer.py | 36 ++- .../nlu/featurizers/test_spacy_featurizer.py | 3 +- tests/nlu/test_evaluation.py | 21 +- tests/nlu/test_spacy_utils.py | 3 +- tests/nlu/tokenizers/test_jieba_tokenizer.py | 5 +- tests/nlu/tokenizers/test_tokenizer.py | 5 +- .../tokenizers/test_whitespace_tokenizer.py | 8 +- tests/nlu/utils/test_mitie_utils.py | 3 +- tests/shared/core/test_constants.py | 9 +- tests/shared/core/test_domain.py | 17 +- tests/shared/core/test_events.py | 93 +++++-- tests/shared/core/test_slot_mappings.py | 5 +- tests/shared/core/test_slots.py | 7 +- .../story_reader/test_yaml_story_reader.py | 8 +- .../core/training_data/test_visualization.py | 6 +- tests/shared/importers/test_multi_project.py | 8 +- .../shared/nlu/training_data/test_message.py | 10 +- tests/shared/test_data.py | 7 +- tests/shared/utils/schemas/test_events.py | 8 +- tests/test_memory_leak.py | 4 +- tests/test_model_training.py | 45 +++- tests/test_server.py | 28 +- tests/test_validator.py | 28 +- tests/train.py | 4 +- tests/utils/tensorflow/test_callback.py | 3 +- tests/utils/tensorflow/test_layers.py | 30 ++- .../utils/tensorflow/test_model_data_utils.py | 4 +- tests/utils/test_common.py | 5 +- tests/utils/test_endpoints.py | 21 +- tests/utils/test_io.py | 8 +- tests/utils/test_plotting.py | 18 +- tests/utils/test_train_utils.py | 6 +- 172 files changed, 1975 insertions(+), 612 deletions(-) diff --git a/rasa/cli/arguments/evaluate.py b/rasa/cli/arguments/evaluate.py index 11833e0c4acc..0057230e354a 100644 --- a/rasa/cli/arguments/evaluate.py +++ b/rasa/cli/arguments/evaluate.py @@ -42,7 +42,8 @@ def set_markers_arguments(parser: argparse.ArgumentParser) -> None: ) add_endpoint_param( - parser, help_text="Configuration file for the tracker store as a yml file.", + parser, + help_text="Configuration file for the tracker store as a yml file.", ) add_domain_param(parser) @@ -51,7 +52,9 @@ def set_markers_arguments(parser: argparse.ArgumentParser) -> None: def set_markers_first_n_arguments(parser: argparse.ArgumentParser) -> None: """Specifies arguments for `rasa evaluate markers first_n`.""" parser.add_argument( - "count", type=int, help="The number of trackers to extract markers from", + "count", + type=int, + help="The number of trackers to extract markers from", ) @@ -61,5 +64,7 @@ def set_markers_sample_arguments(parser: argparse.ArgumentParser) -> None: "--seed", type=int, help="Seed to use if selecting trackers by 'sample_n'" ) parser.add_argument( - "count", type=int, help="The number of trackers to extract markers from", + "count", + type=int, + help="The number of trackers to extract markers from", ) diff --git a/rasa/cli/arguments/run.py b/rasa/cli/arguments/run.py index 14bfcdc7ac91..f02152f1a4fe 100644 --- a/rasa/cli/arguments/run.py +++ b/rasa/cli/arguments/run.py @@ -56,7 +56,9 @@ def add_server_arguments(parser: argparse.ArgumentParser) -> None: help="Store logs in specified file.", ) parser.add_argument( - "--use-syslog", action="store_true", help="Add syslog as a log handler", + "--use-syslog", + action="store_true", + help="Add syslog as a log handler", ) parser.add_argument( "--syslog-address", diff --git a/rasa/cli/data.py b/rasa/cli/data.py index 1e89d8175d40..258148b12029 100644 --- a/rasa/cli/data.py +++ b/rasa/cli/data.py @@ -167,7 +167,9 @@ def validate_files(args: argparse.Namespace, stories_only: bool = False) -> None ) file_importer = RasaFileImporter( - domain_path=args.domain, training_data_paths=args.data, config_file=config, + domain_path=args.domain, + training_data_paths=args.data, + config_file=config, ) validator = Validator.from_importer(file_importer) diff --git a/rasa/cli/evaluate.py b/rasa/cli/evaluate.py index afe63706002e..9e409160e478 100644 --- a/rasa/cli/evaluate.py +++ b/rasa/cli/evaluate.py @@ -211,4 +211,9 @@ def _create_tracker_loader( """ endpoints = AvailableEndpoints.read_endpoints(endpoint_config) tracker_store = TrackerStore.create(endpoints.tracker_store, domain=domain) - return MarkerTrackerLoader(tracker_store, strategy, count, seed,) + return MarkerTrackerLoader( + tracker_store, + strategy, + count, + seed, + ) diff --git a/rasa/cli/telemetry.py b/rasa/cli/telemetry.py index 139b5b09356a..a4031e4d9348 100644 --- a/rasa/cli/telemetry.py +++ b/rasa/cli/telemetry.py @@ -10,7 +10,8 @@ def add_subparser( - subparsers: SubParsersAction, parents: List[argparse.ArgumentParser], + subparsers: SubParsersAction, + parents: List[argparse.ArgumentParser], ) -> None: """Add all telemetry tracking parsers. @@ -18,7 +19,6 @@ def add_subparser( subparsers: subparser we are going to attach to parents: Parent parsers, needed to ensure tree structure in argparse """ - telemetry_parser = subparsers.add_parser( "telemetry", parents=parents, diff --git a/rasa/cli/test.py b/rasa/cli/test.py index 0e326b5d1fab..6695ed7a1017 100644 --- a/rasa/cli/test.py +++ b/rasa/cli/test.py @@ -183,7 +183,8 @@ async def run_nlu_test_async( data_path = rasa.cli.utils.get_validated_path(data_path, "nlu", DEFAULT_DATA_PATH) test_data_importer = TrainingDataImporter.load_from_dict( - training_data_paths=[data_path], domain_path=DEFAULT_DOMAIN_PATH, + training_data_paths=[data_path], + domain_path=DEFAULT_DOMAIN_PATH, ) nlu_data = test_data_importer.get_nlu_data() @@ -205,7 +206,8 @@ async def run_nlu_test_async( for file in config: try: validation_utils.validate_yaml_schema( - rasa.shared.utils.io.read_file(file), CONFIG_SCHEMA_FILE, + rasa.shared.utils.io.read_file(file), + CONFIG_SCHEMA_FILE, ) config_files.append(file) except YamlException: diff --git a/rasa/cli/x.py b/rasa/cli/x.py index 30590d7da58c..59fb9a8c9176 100644 --- a/rasa/cli/x.py +++ b/rasa/cli/x.py @@ -423,7 +423,9 @@ def run_in_production(args: argparse.Namespace) -> None: _rasa_service(args, endpoints, None, credentials_path) -def _get_config_path(args: argparse.Namespace,) -> Optional[Text]: +def _get_config_path( + args: argparse.Namespace, +) -> Optional[Text]: config_path = rasa.cli.utils.get_validated_path( args.config, "config", DEFAULT_CONFIG_PATH ) @@ -431,7 +433,9 @@ def _get_config_path(args: argparse.Namespace,) -> Optional[Text]: return config_path -def _get_domain_path(args: argparse.Namespace,) -> Optional[Text]: +def _get_domain_path( + args: argparse.Namespace, +) -> Optional[Text]: domain_path = rasa.cli.utils.get_validated_path( args.domain, "domain", DEFAULT_DOMAIN_PATH ) diff --git a/rasa/core/actions/action.py b/rasa/core/actions/action.py index 3250be71d3ce..2bd0b8e7be37 100644 --- a/rasa/core/actions/action.py +++ b/rasa/core/actions/action.py @@ -1240,7 +1240,10 @@ def extract_slot_value_from_predefined_mapping( """Extracts slot value if slot has an applicable predefined mapping.""" should_fill_entity_slot = ( mapping_type == SlotMappingType.FROM_ENTITY - and SlotMapping.entity_is_desired(mapping, tracker,) + and SlotMapping.entity_is_desired( + mapping, + tracker, + ) ) should_fill_intent_slot = mapping_type == SlotMappingType.FROM_INTENT diff --git a/rasa/core/actions/forms.py b/rasa/core/actions/forms.py index 09761483eb3f..92638ed8af19 100644 --- a/rasa/core/actions/forms.py +++ b/rasa/core/actions/forms.py @@ -344,7 +344,9 @@ def _update_slot_values( return slot_values def _add_dynamic_slots_requested_by_dynamic_forms( - self, tracker: "DialogueStateTracker", domain: Domain, + self, + tracker: "DialogueStateTracker", + domain: Domain, ) -> Set[Text]: required_slots = set(self.required_slots(domain)) requested_slot = self.get_slot_to_fill(tracker) @@ -355,7 +357,9 @@ def _add_dynamic_slots_requested_by_dynamic_forms( return required_slots def _get_slot_extractions( - self, tracker: "DialogueStateTracker", domain: Domain, + self, + tracker: "DialogueStateTracker", + domain: Domain, ) -> Dict[Text, Any]: events_since_last_user_uttered = FormAction._get_events_since_last_user_uttered( tracker @@ -631,22 +635,25 @@ async def is_done( # We explicitly check only the last occurrences for each possible termination # event instead of doing `return event in events_so_far` to make it possible # to override termination events which were returned earlier. - return next( - ( - event - for event in reversed(events_so_far) - if isinstance(event, SlotSet) and event.key == REQUESTED_SLOT - ), - None, - ) == SlotSet(REQUESTED_SLOT, None) or next( - ( - event - for event in reversed(events_so_far) - if isinstance(event, ActiveLoop) - ), - None, - ) == ActiveLoop( - None + return ( + next( + ( + event + for event in reversed(events_so_far) + if isinstance(event, SlotSet) and event.key == REQUESTED_SLOT + ), + None, + ) + == SlotSet(REQUESTED_SLOT, None) + or next( + ( + event + for event in reversed(events_so_far) + if isinstance(event, ActiveLoop) + ), + None, + ) + == ActiveLoop(None) ) async def deactivate(self, *args: Any, **kwargs: Any) -> List[Event]: diff --git a/rasa/core/agent.py b/rasa/core/agent.py index eac8f7cbf204..3a604dd7778b 100644 --- a/rasa/core/agent.py +++ b/rasa/core/agent.py @@ -357,7 +357,9 @@ def load( return agent def load_model( - self, model_path: Union[Text, Path], fingerprint: Optional[Text] = None, + self, + model_path: Union[Text, Path], + fingerprint: Optional[Text] = None, ) -> None: """Loads the agent's model and processor given a new model path.""" self.processor = MessageProcessor( @@ -419,7 +421,8 @@ async def parse_message(self, message_data: Text) -> Dict[Text, Any]: return await self.processor.parse_message(message) async def handle_message( - self, message: UserMessage, + self, + message: UserMessage, ) -> Optional[List[Dict[Text, Any]]]: """Handle a single message.""" if not self.is_ready(): @@ -446,7 +449,10 @@ def predict_next_with_tracker( return self.processor.predict_next_with_tracker(tracker, verbosity) @agent_must_be_ready - async def log_message(self, message: UserMessage,) -> DialogueStateTracker: + async def log_message( + self, + message: UserMessage, + ) -> DialogueStateTracker: """Append a message to a dialogue - does not predict actions.""" return await self.processor.log_message(message) diff --git a/rasa/core/brokers/kafka.py b/rasa/core/brokers/kafka.py index f0b33dd545aa..8f85639c915c 100644 --- a/rasa/core/brokers/kafka.py +++ b/rasa/core/brokers/kafka.py @@ -142,7 +142,8 @@ def _create_producer(self) -> None: if self.security_protocol == "PLAINTEXT": authentication_params = dict( - security_protocol=self.security_protocol, ssl_check_hostname=False, + security_protocol=self.security_protocol, + ssl_check_hostname=False, ) elif self.security_protocol == "SASL_PLAINTEXT": authentication_params = dict( diff --git a/rasa/core/channels/socketio.py b/rasa/core/channels/socketio.py index 52fdf8c0289f..65e80b0bda0c 100644 --- a/rasa/core/channels/socketio.py +++ b/rasa/core/channels/socketio.py @@ -197,7 +197,9 @@ async def connect(sid: Text, environ: Dict, auth: Optional[Dict]) -> bool: jwt_payload = None if auth and auth.get("token"): jwt_payload = rasa.core.channels.channel.decode_bearer_token( - auth.get("token"), self.jwt_key, self.jwt_algorithm, + auth.get("token"), + self.jwt_key, + self.jwt_algorithm, ) if jwt_payload: diff --git a/rasa/core/channels/twilio_voice.py b/rasa/core/channels/twilio_voice.py index 2f83f3194bd5..a59679d8bf66 100644 --- a/rasa/core/channels/twilio_voice.py +++ b/rasa/core/channels/twilio_voice.py @@ -240,7 +240,10 @@ async def receive(request: Request) -> HTTPResponse: if text is not None: await on_new_message( UserMessage( - text, collector, sender_id, input_channel=input_channel, + text, + collector, + sender_id, + input_channel=input_channel, ) ) @@ -313,7 +316,9 @@ def name(cls) -> Text: return "twilio_voice" @staticmethod - def _emoji_warning(text: Text,) -> None: + def _emoji_warning( + text: Text, + ) -> None: """Raises a warning if text contains an emoji.""" emoji_regex = rasa.utils.io.get_emoji_regex() if emoji_regex.findall(text): diff --git a/rasa/core/evaluation/marker_base.py b/rasa/core/evaluation/marker_base.py index 76ad1ffe747f..5d72a390ce5d 100644 --- a/rasa/core/evaluation/marker_base.py +++ b/rasa/core/evaluation/marker_base.py @@ -567,11 +567,15 @@ def from_config(config: Any, name: Optional[Text] = None) -> Marker: tag, _ = MarkerRegistry.get_non_negated_tag(tag_or_negated_tag=tag) if tag in MarkerRegistry.operator_tag_to_marker_class: marker = OperatorMarker.from_tag_and_sub_config( - tag=tag, sub_config=sub_marker_config, name=name, + tag=tag, + sub_config=sub_marker_config, + name=name, ) elif tag in MarkerRegistry.condition_tag_to_marker_class: marker = ConditionMarker.from_tag_and_sub_config( - tag=tag, sub_config=sub_marker_config, name=name, + tag=tag, + sub_config=sub_marker_config, + name=name, ) else: raise InvalidMarkerConfig( @@ -671,7 +675,10 @@ def _save_results(path: Path, results: Dict[Text, List[SessionEvaluation]]) -> N @staticmethod def _write_relevant_events( - writer: WriteRow, sender_id: Text, session_idx: int, session: SessionEvaluation, + writer: WriteRow, + sender_id: Text, + session_idx: int, + session: SessionEvaluation, ) -> None: for marker_name, meta_data_per_relevant_event in session.items(): for event_meta_data in meta_data_per_relevant_event: @@ -775,7 +782,9 @@ def max_depth(self) -> int: @staticmethod def from_tag_and_sub_config( - tag: Text, sub_config: Any, name: Optional[Text] = None, + tag: Text, + sub_config: Any, + name: Optional[Text] = None, ) -> OperatorMarker: """Creates an operator marker from the given config. diff --git a/rasa/core/featurizers/precomputation.py b/rasa/core/featurizers/precomputation.py index 474d9e01a5a1..f3821bea4268 100644 --- a/rasa/core/featurizers/precomputation.py +++ b/rasa/core/featurizers/precomputation.py @@ -324,7 +324,9 @@ def create( return cls() def convert_for_training( - self, domain: Domain, story_graph: StoryGraph, + self, + domain: Domain, + story_graph: StoryGraph, ) -> TrainingData: """Creates de-duplicated training data. diff --git a/rasa/core/featurizers/single_state_featurizer.py b/rasa/core/featurizers/single_state_featurizer.py index 1c0ce59677bf..ed0fe3702b6f 100644 --- a/rasa/core/featurizers/single_state_featurizer.py +++ b/rasa/core/featurizers/single_state_featurizer.py @@ -85,7 +85,9 @@ def _create_entity_tag_specs( ] def prepare_for_training( - self, domain: Domain, bilou_tagging: bool = False, + self, + domain: Domain, + bilou_tagging: bool = False, ) -> None: """Gets necessary information for featurization from domain. @@ -258,7 +260,9 @@ def encode_state( if state_type == PREVIOUS_ACTION: state_features.update( self._extract_state_features( - sub_state, precomputations=precomputations, sparse=True, + sub_state, + precomputations=precomputations, + sparse=True, ) ) # featurize user only if it is "real" user input, @@ -267,7 +271,9 @@ def encode_state( state_features.update( self._extract_state_features( - sub_state, precomputations=precomputations, sparse=True, + sub_state, + precomputations=precomputations, + sparse=True, ) ) if sub_state.get(ENTITIES): diff --git a/rasa/core/featurizers/tracker_featurizers.py b/rasa/core/featurizers/tracker_featurizers.py index 95a23e0ef044..51567619247f 100644 --- a/rasa/core/featurizers/tracker_featurizers.py +++ b/rasa/core/featurizers/tracker_featurizers.py @@ -155,7 +155,9 @@ def _create_entity_tags( return [ [ self.state_featurizer.encode_entities( - entity_data, precomputations, bilou_tagging, + entity_data, + precomputations, + bilou_tagging, ) for entity_data in trackers_entities ] @@ -256,7 +258,9 @@ def training_states_labels_and_entities( ) def prepare_for_featurization( - self, domain: Domain, bilou_tagging: bool = False, + self, + domain: Domain, + bilou_tagging: bool = False, ) -> None: """Ensures that the featurizer is ready to be called during training. diff --git a/rasa/core/http_interpreter.py b/rasa/core/http_interpreter.py index fed2bfaa8fec..57645fe4a2b0 100644 --- a/rasa/core/http_interpreter.py +++ b/rasa/core/http_interpreter.py @@ -22,7 +22,10 @@ def __init__(self, endpoint_config: Optional[EndpointConfig] = None) -> None: else: self.endpoint_config = EndpointConfig(constants.DEFAULT_SERVER_URL) - async def parse(self, message: UserMessage,) -> Dict[Text, Any]: + async def parse( + self, + message: UserMessage, + ) -> Dict[Text, Any]: """Parse a text message. Return a default value if the parsing of the text failed. diff --git a/rasa/core/migrate.py b/rasa/core/migrate.py index 180ab6f250ea..e26fc746615a 100644 --- a/rasa/core/migrate.py +++ b/rasa/core/migrate.py @@ -114,7 +114,9 @@ def _migrate_form_slots( def _migrate_auto_fill( - slot_name: Text, properties: Dict[Text, Any], entities: List[Text], + slot_name: Text, + properties: Dict[Text, Any], + entities: List[Text], ) -> Dict[Text, Any]: if slot_name in entities and properties.get("auto_fill", True) is True: from_entity_mapping = { @@ -179,7 +181,10 @@ def _assemble_new_domain( def _write_final_domain( - domain_file: Path, new_forms: Dict, new_slots: Dict, out_file: Path, + domain_file: Path, + new_forms: Dict, + new_slots: Dict, + out_file: Path, ) -> None: if domain_file.is_dir(): for file in domain_file.iterdir(): diff --git a/rasa/core/nlg/response.py b/rasa/core/nlg/response.py index 6c2da000fd23..edac953e1ab6 100644 --- a/rasa/core/nlg/response.py +++ b/rasa/core/nlg/response.py @@ -27,7 +27,9 @@ def __init__(self, responses: Dict[Text, List[Dict[Text, Any]]]) -> None: self.responses = responses def _matches_filled_slots( - self, filled_slots: Dict[Text, Any], response: Dict[Text, Any], + self, + filled_slots: Dict[Text, Any], + response: Dict[Text, Any], ) -> bool: """Checks if the conditional response variation matches the filled slots.""" constraints = response.get(RESPONSE_CONDITION) diff --git a/rasa/core/policies/ensemble.py b/rasa/core/policies/ensemble.py index 5a6327ecb5eb..1df06f918b95 100644 --- a/rasa/core/policies/ensemble.py +++ b/rasa/core/policies/ensemble.py @@ -100,7 +100,9 @@ def combine_predictions_from_kwargs( value for value in kwargs.values() if isinstance(value, PolicyPrediction) ] return self.combine_predictions( - predictions=predictions, tracker=tracker, domain=domain, + predictions=predictions, + tracker=tracker, + domain=domain, ) @abstractmethod diff --git a/rasa/core/policies/memoization.py b/rasa/core/policies/memoization.py index d90935319062..23f38a3a462f 100644 --- a/rasa/core/policies/memoization.py +++ b/rasa/core/policies/memoization.py @@ -91,7 +91,11 @@ def __init__( ) -> None: """Initialize the policy.""" super().__init__( - config, model_storage, resource, execution_context, featurizer, + config, + model_storage, + resource, + execution_context, + featurizer, ) self.lookup = lookup or {} @@ -458,7 +462,8 @@ def recall( def _get_max_applied_events_for_max_history( - tracker: DialogueStateTracker, max_history: Optional[int], + tracker: DialogueStateTracker, + max_history: Optional[int], ) -> Optional[int]: """Computes the number of events in the tracker that correspond to max_history. @@ -485,7 +490,8 @@ def _get_max_applied_events_for_max_history( def _trim_tracker_by_max_history( - tracker: DialogueStateTracker, max_history: Optional[int], + tracker: DialogueStateTracker, + max_history: Optional[int], ) -> DialogueStateTracker: """Removes events from the tracker until it has `max_history` actions. diff --git a/rasa/core/policies/policy.py b/rasa/core/policies/policy.py index 5a9400e1ef8c..37871ecddfe6 100644 --- a/rasa/core/policies/policy.py +++ b/rasa/core/policies/policy.py @@ -422,7 +422,11 @@ def load( ) return cls( - config, model_storage, resource, execution_context, featurizer=featurizer, + config, + model_storage, + resource, + execution_context, + featurizer=featurizer, ) def _default_predictions(self, domain: Domain) -> List[float]: diff --git a/rasa/core/policies/rule_policy.py b/rasa/core/policies/rule_policy.py index b7ec4b06b29c..bec4b0cc57dd 100644 --- a/rasa/core/policies/rule_policy.py +++ b/rasa/core/policies/rule_policy.py @@ -156,7 +156,12 @@ def __init__( config[POLICY_MAX_HISTORY] = None super().__init__( - config, model_storage, resource, execution_context, featurizer, lookup, + config, + model_storage, + resource, + execution_context, + featurizer, + lookup, ) self._fallback_action_name = config["core_fallback_action_name"] diff --git a/rasa/core/policies/ted_policy.py b/rasa/core/policies/ted_policy.py index f2083a3feb78..c87139c06f5e 100644 --- a/rasa/core/policies/ted_policy.py +++ b/rasa/core/policies/ted_policy.py @@ -360,7 +360,11 @@ def __init__( ) -> None: """Declares instance variables with default values.""" super().__init__( - config, model_storage, resource, execution_context, featurizer=featurizer, + config, + model_storage, + resource, + execution_context, + featurizer=featurizer, ) self.split_entities_config = rasa.utils.train_utils.init_split_entities( @@ -702,7 +706,9 @@ def train( ) model_data, label_ids = self._prepare_for_training( - training_trackers, domain, precomputations, + training_trackers, + domain, + precomputations, ) if model_data.is_empty(): @@ -938,10 +944,12 @@ def persist_model_utilities(self, model_path: Path) -> None: model_path / f"{model_filename}.meta.pkl", self.config ) rasa.utils.io.pickle_dump( - model_path / f"{model_filename}.data_example.pkl", self.data_example, + model_path / f"{model_filename}.data_example.pkl", + self.data_example, ) rasa.utils.io.pickle_dump( - model_path / f"{model_filename}.fake_features.pkl", self.fake_features, + model_path / f"{model_filename}.fake_features.pkl", + self.fake_features, ) rasa.utils.io.pickle_dump( model_path / f"{model_filename}.label_data.pkl", @@ -953,7 +961,8 @@ def persist_model_utilities(self, model_path: Path) -> None: else [] ) rasa.shared.utils.io.dump_obj_as_json_to_file( - model_path / f"{model_filename}.entity_tag_specs.json", entity_tag_specs, + model_path / f"{model_filename}.entity_tag_specs.json", + entity_tag_specs, ) @classmethod @@ -1020,14 +1029,23 @@ def load( try: with model_storage.read_from(resource) as model_path: return cls._load( - model_path, config, model_storage, resource, execution_context, + model_path, + config, + model_storage, + resource, + execution_context, ) except ValueError: logger.debug( f"Failed to load {cls.__class__.__name__} from model storage. Resource " f"'{resource.name}' doesn't exist." ) - return cls(config, model_storage, resource, execution_context,) + return cls( + config, + model_storage, + resource, + execution_context, + ) @classmethod def _load( @@ -1633,7 +1651,8 @@ def _encode_real_features_per_attribute( # combined batch dimension and dialogue length x 1 x units attribute_features = tf.expand_dims( self._last_token( - attribute_features, combined_sentence_sequence_feature_lengths, + attribute_features, + combined_sentence_sequence_feature_lengths, ), axis=1, ) diff --git a/rasa/core/policies/unexpected_intent_policy.py b/rasa/core/policies/unexpected_intent_policy.py index 5a28d1334042..d0767432ec97 100644 --- a/rasa/core/policies/unexpected_intent_policy.py +++ b/rasa/core/policies/unexpected_intent_policy.py @@ -175,9 +175,15 @@ def get_default_config() -> Dict[Text, Any]: # the dialogue transformer encoder. ENCODING_DIMENSION: 50, # Number of units in transformer encoders - TRANSFORMER_SIZE: {TEXT: 128, DIALOGUE: 128,}, + TRANSFORMER_SIZE: { + TEXT: 128, + DIALOGUE: 128, + }, # Number of layers in transformer encoders - NUM_TRANSFORMER_LAYERS: {TEXT: 1, DIALOGUE: 1,}, + NUM_TRANSFORMER_LAYERS: { + TEXT: 1, + DIALOGUE: 1, + }, # Number of attention heads in transformer NUM_HEADS: 4, # If 'True' use key relative embeddings in attention @@ -373,7 +379,10 @@ def _assemble_label_data( label_data = RasaModelData() label_data.add_data(attribute_data, key_prefix=f"{LABEL_KEY}_") label_data.add_lengths( - f"{LABEL}_{INTENT}", SEQUENCE_LENGTH, f"{LABEL}_{INTENT}", SEQUENCE, + f"{LABEL}_{INTENT}", + SEQUENCE_LENGTH, + f"{LABEL}_{INTENT}", + SEQUENCE, ) label_ids = np.arange(len(domain.intents)) label_data.add_features( @@ -523,7 +532,9 @@ def _collect_action_metadata( query_intent_index = domain.intents.index(query_intent) def _compile_metadata_for_label( - label_name: Text, similarity_score: float, threshold: Optional[float], + label_name: Text, + similarity_score: float, + threshold: Optional[float], ) -> "RankingCandidateMetadata": severity = float(threshold - similarity_score) if threshold else None return { @@ -630,7 +641,10 @@ def predict_action_probabilities( ) @staticmethod - def _should_skip_prediction(tracker: DialogueStateTracker, domain: Domain,) -> bool: + def _should_skip_prediction( + tracker: DialogueStateTracker, + domain: Domain, + ) -> bool: """Checks if the policy should skip making a prediction. A prediction can be skipped if: @@ -755,7 +769,8 @@ def _check_unlikely_intent( @staticmethod def _collect_label_id_grouped_scores( - output_scores: Dict[Text, np.ndarray], label_ids: np.ndarray, + output_scores: Dict[Text, np.ndarray], + label_ids: np.ndarray, ) -> Dict[int, Dict[Text, List[float]]]: """Collects similarities predicted for each label id. @@ -925,7 +940,10 @@ class IntentTED(TED): """ def _prepare_dot_product_loss( - self, name: Text, scale_loss: bool, prefix: Text = "loss", + self, + name: Text, + scale_loss: bool, + prefix: Text = "loss", ) -> None: self._tf_layers[f"{prefix}.{name}"] = self.dot_product_loss_layer( self.config[NUM_NEG], diff --git a/rasa/core/processor.py b/rasa/core/processor.py index 26d2c0da7cff..45f58ecdbcda 100644 --- a/rasa/core/processor.py +++ b/rasa/core/processor.py @@ -155,7 +155,9 @@ async def handle_message( return None async def run_action_extract_slots( - self, output_channel: OutputChannel, tracker: DialogueStateTracker, + self, + output_channel: OutputChannel, + tracker: DialogueStateTracker, ) -> DialogueStateTracker: """Run action to extract slots and update the tracker accordingly. @@ -167,7 +169,9 @@ async def run_action_extract_slots( the given (updated) tracker """ action_extract_slots = rasa.core.actions.action.action_for_name_or_text( - ACTION_EXTRACT_SLOTS, self.domain, self.action_endpoint, + ACTION_EXTRACT_SLOTS, + self.domain, + self.action_endpoint, ) extraction_events = await action_extract_slots.run( output_channel, self.nlg, tracker, self.domain @@ -711,7 +715,9 @@ def _should_handle_message(tracker: DialogueStateTracker) -> bool: ) def is_action_limit_reached( - self, tracker: DialogueStateTracker, should_predict_another_action: bool, + self, + tracker: DialogueStateTracker, + should_predict_another_action: bool, ) -> bool: """Check whether the maximum number of predictions has been met. @@ -738,7 +744,9 @@ def is_action_limit_reached( ) async def _run_prediction_loop( - self, output_channel: OutputChannel, tracker: DialogueStateTracker, + self, + output_channel: OutputChannel, + tracker: DialogueStateTracker, ) -> None: # keep taking actions decided by the policy until it chooses to 'listen' should_predict_another_action = True @@ -988,7 +996,8 @@ def _predict_next_with_tracker( raise ValueError("Cannot predict next action if there is no core target.") results = self.graph_runner.run( - inputs={PLACEHOLDER_TRACKER: tracker}, targets=[target], + inputs={PLACEHOLDER_TRACKER: tracker}, + targets=[target], ) policy_prediction = results[target] return policy_prediction diff --git a/rasa/core/run.py b/rasa/core/run.py index 768a79cb9641..a9c53b88f5b6 100644 --- a/rasa/core/run.py +++ b/rasa/core/run.py @@ -96,7 +96,12 @@ def configure_app( ) -> Sanic: """Run the agent.""" rasa.core.utils.configure_file_logging( - logger, log_file, use_syslog, syslog_address, syslog_port, syslog_protocol, + logger, + log_file, + use_syslog, + syslog_address, + syslog_port, + syslog_protocol, ) if enable_api: @@ -215,7 +220,11 @@ def serve_application( ) rasa.utils.common.update_sanic_log_level( - log_file, use_syslog, syslog_address, syslog_port, syslog_protocol, + log_file, + use_syslog, + syslog_address, + syslog_port, + syslog_protocol, ) app.run( diff --git a/rasa/core/test.py b/rasa/core/test.py index d31128376911..e3f69dd94f46 100644 --- a/rasa/core/test.py +++ b/rasa/core/test.py @@ -843,7 +843,10 @@ async def _predict_tracker_actions( prediction, entity_result, ) = await _collect_action_executed_predictions( - processor, partial_tracker, event, fail_on_prediction_errors, + processor, + partial_tracker, + event, + fail_on_prediction_errors, ) if entity_result: policy_entity_results.append(entity_result) @@ -994,7 +997,9 @@ async def _collect_story_predictions( accuracy = 0 _log_evaluation_table( - [1] * len(completed_trackers), "CONVERSATION", accuracy, + [1] * len(completed_trackers), + "CONVERSATION", + accuracy, ) return ( diff --git a/rasa/core/tracker_store.py b/rasa/core/tracker_store.py index c2f927bc53c9..d610421f2d44 100644 --- a/rasa/core/tracker_store.py +++ b/rasa/core/tracker_store.py @@ -418,7 +418,9 @@ def get_or_create_table( except self.client.exceptions.ResourceNotFoundException: table = dynamo.create_table( TableName=self.table_name, - KeySchema=[{"AttributeName": "sender_id", "KeyType": "HASH"},], + KeySchema=[ + {"AttributeName": "sender_id", "KeyType": "HASH"}, + ], AttributeDefinitions=[ {"AttributeName": "sender_id", "AttributeType": "S"}, ], @@ -444,7 +446,9 @@ def serialise_tracker(self, tracker: "DialogueStateTracker") -> Dict: """Serializes the tracker, returns object with decimal types.""" d = tracker.as_dialogue().as_dict() d.update( - {"sender_id": tracker.sender_id,} + { + "sender_id": tracker.sender_id, + } ) # DynamoDB cannot store `float`s, so we'll convert them to `Decimal`s return core_utils.replace_floats_with_decimals(d) diff --git a/rasa/core/training/__init__.py b/rasa/core/training/__init__.py index 7b3d30b95925..30287ccce2b2 100644 --- a/rasa/core/training/__init__.py +++ b/rasa/core/training/__init__.py @@ -8,7 +8,9 @@ def extract_story_graph( - resource_name: Text, domain: "Domain", exclusion_percentage: Optional[int] = None, + resource_name: Text, + domain: "Domain", + exclusion_percentage: Optional[int] = None, ) -> "StoryGraph": """Loads training stories / rules from file or directory. @@ -25,7 +27,9 @@ def extract_story_graph( import rasa.shared.core.training_data.loading as core_loading story_steps = core_loading.load_data_from_resource( - resource_name, domain, exclusion_percentage=exclusion_percentage, + resource_name, + domain, + exclusion_percentage=exclusion_percentage, ) return StoryGraph(story_steps) diff --git a/rasa/core/training/interactive.py b/rasa/core/training/interactive.py index 2e86909986ed..701db010029a 100644 --- a/rasa/core/training/interactive.py +++ b/rasa/core/training/interactive.py @@ -666,7 +666,10 @@ async def _request_action_from_user( await _print_history(conversation_id, endpoint) choices = [ - {"name": f'{a["score"]:03.2f} {a["action"]:40}', "value": a["action"],} + { + "name": f'{a["score"]:03.2f} {a["action"]:40}', + "value": a["action"], + } for a in predictions ] @@ -1628,7 +1631,9 @@ async def visualisation_html(request: Request) -> HTTPResponse: # noinspection PyUnusedLocal @app.route("/visualization.dot", methods=["GET"]) - async def visualisation_png(request: Request,) -> HTTPResponse: + async def visualisation_png( + request: Request, + ) -> HTTPResponse: try: headers = {"Cache-Control": "no-cache"} return await response.file(os.path.abspath(image_path), headers=headers) diff --git a/rasa/core/training/story_conflict.py b/rasa/core/training/story_conflict.py index c6a2fb645807..cfe2f2185965 100644 --- a/rasa/core/training/story_conflict.py +++ b/rasa/core/training/story_conflict.py @@ -170,7 +170,10 @@ def find_story_conflicts( # Iterate once more over all states and note the (unhashed) state, # for which a conflict occurs conflicts = _build_conflicts_from_states( - trackers, domain, max_history, conflicting_state_action_mapping, + trackers, + domain, + max_history, + conflicting_state_action_mapping, ) return conflicts @@ -266,7 +269,8 @@ def _build_conflicts_from_states( conflicts[hashed_state] = StoryConflict(element.sliced_states) conflicts[hashed_state].add_conflicting_action( - action=str(element.event), story_name=element.tracker.sender_id, + action=str(element.event), + story_name=element.tracker.sender_id, ) # Return list of conflicts that arise from unpredictable actions diff --git a/rasa/core/utils.py b/rasa/core/utils.py index ef626b7d4461..54532b24232d 100644 --- a/rasa/core/utils.py +++ b/rasa/core/utils.py @@ -54,7 +54,8 @@ def configure_file_logging( ) socktype = SOCK_STREAM if syslog_protocol == TCP_PROTOCOL else SOCK_DGRAM syslog_handler = logging.handlers.SysLogHandler( - address=(syslog_address, syslog_port), socktype=socktype, + address=(syslog_address, syslog_port), + socktype=socktype, ) syslog_handler.setLevel(logger_obj.level) syslog_handler.setFormatter(formatter) diff --git a/rasa/engine/graph.py b/rasa/engine/graph.py index 18747846f33c..1760e12b69c6 100644 --- a/rasa/engine/graph.py +++ b/rasa/engine/graph.py @@ -361,7 +361,8 @@ def __init__( self._component_class, self._constructor_name ) self._component_config: Dict[Text, Any] = rasa.utils.common.override_defaults( - self._component_class.get_default_config(), component_config, + self._component_class.get_default_config(), + component_config, ) self._fn_name: Text = fn_name self._fn: Callable = getattr(self._component_class, self._fn_name) diff --git a/rasa/engine/recipes/default_recipe.py b/rasa/engine/recipes/default_recipe.py index d00cf0b2439d..7e22372fb1d4 100644 --- a/rasa/engine/recipes/default_recipe.py +++ b/rasa/engine/recipes/default_recipe.py @@ -50,7 +50,11 @@ logger = logging.getLogger(__name__) -DEFAULT_PREDICT_KWARGS = dict(constructor_name="load", eager=True, is_target=False,) +DEFAULT_PREDICT_KWARGS = dict( + constructor_name="load", + eager=True, + is_target=False, +) class DefaultV1RecipeRegisterException(RasaException): @@ -329,7 +333,10 @@ def _add_nlu_train_node( cli_parameters: Dict[Text, Any], ) -> Text: config_from_cli = self._extra_config_from_cli(cli_parameters, component, config) - model_provider_needs = self._get_model_provider_needs(train_nodes, component,) + model_provider_needs = self._get_model_provider_needs( + train_nodes, + component, + ) train_node_name = f"train_{component_name}" train_nodes[train_node_name] = SchemaNode( @@ -394,7 +401,8 @@ def _add_nlu_process_node( resource_needs = {"resource": from_resource} model_provider_needs = self._get_model_provider_needs( - train_nodes, component_class, + train_nodes, + component_class, ) node_name = f"run_{component_name}" @@ -412,7 +420,9 @@ def _add_nlu_process_node( return node_name def _get_model_provider_needs( - self, nodes: Dict[Text, SchemaNode], component_class: Type[GraphComponent], + self, + nodes: Dict[Text, SchemaNode], + component_class: Type[GraphComponent], ) -> Dict[Text, Text]: model_provider_needs = {} component = self._from_registry(component_class.__name__) @@ -517,7 +527,9 @@ def _add_core_train_nodes( self._add_end_to_end_features_for_training(preprocessors, train_nodes) def _add_end_to_end_features_for_training( - self, preprocessors: List[Text], train_nodes: Dict[Text, SchemaNode], + self, + preprocessors: List[Text], + train_nodes: Dict[Text, SchemaNode], ) -> None: train_nodes["story_to_nlu_training_data_converter"] = SchemaNode( needs={ @@ -542,7 +554,9 @@ def _add_end_to_end_features_for_training( node_with_e2e_features = "end_to_end_features_provider" train_nodes[node_with_e2e_features] = SchemaNode( - needs={"messages": last_node_name,}, + needs={ + "messages": last_node_name, + }, uses=CoreFeaturizationCollector, constructor_name="create", fn="collect", @@ -591,7 +605,10 @@ def _create_predict_nodes( if self._use_core: self._add_core_predict_nodes( - predict_config, predict_nodes, train_nodes, preprocessors, + predict_config, + predict_nodes, + train_nodes, + preprocessors, ) return predict_nodes @@ -693,7 +710,10 @@ def _add_nlu_predict_node( ) -> Text: node_name = f"run_{component_name}" - model_provider_needs = self._get_model_provider_needs(predict_nodes, node.uses,) + model_provider_needs = self._get_model_provider_needs( + predict_nodes, + node.uses, + ) predict_nodes[node_name] = dataclasses.replace( node, @@ -785,7 +805,9 @@ def _add_core_predict_nodes( ) def _add_end_to_end_features_for_inference( - self, predict_nodes: Dict[Text, SchemaNode], preprocessors: List[Text], + self, + predict_nodes: Dict[Text, SchemaNode], + preprocessors: List[Text], ) -> Text: predict_nodes["tracker_to_message_converter"] = SchemaNode( **DEFAULT_PREDICT_KWARGS, @@ -808,7 +830,9 @@ def _add_end_to_end_features_for_inference( node_with_e2e_features = "end_to_end_features_provider" predict_nodes[node_with_e2e_features] = SchemaNode( **DEFAULT_PREDICT_KWARGS, - needs={"messages": last_node_name,}, + needs={ + "messages": last_node_name, + }, uses=CoreFeaturizationCollector, fn="collect", config={}, diff --git a/rasa/engine/runner/dask.py b/rasa/engine/runner/dask.py index f51e15992eae..48fc9d1b083e 100644 --- a/rasa/engine/runner/dask.py +++ b/rasa/engine/runner/dask.py @@ -109,7 +109,10 @@ def run( raise GraphRunError("Error running runner.") from e @staticmethod - def _add_inputs_to_graph(inputs: Optional[Dict[Text, Any]], graph: Any,) -> None: + def _add_inputs_to_graph( + inputs: Optional[Dict[Text, Any]], + graph: Any, + ) -> None: for input_name, input_value in inputs.items(): if isinstance(input_value, str) and input_value in graph.keys(): raise GraphRunError( diff --git a/rasa/engine/storage/local_model_storage.py b/rasa/engine/storage/local_model_storage.py index 54a765cf7714..72beaeeafec7 100644 --- a/rasa/engine/storage/local_model_storage.py +++ b/rasa/engine/storage/local_model_storage.py @@ -85,14 +85,17 @@ def metadata_from_archive( @staticmethod def _extract_archive_to_directory( - model_archive_path: Union[Text, Path], temporary_directory: Path, + model_archive_path: Union[Text, Path], + temporary_directory: Path, ) -> None: with TarSafe.open(model_archive_path, mode="r:gz") as tar: tar.extractall(temporary_directory) LocalModelStorage._assert_not_rasa2_archive(temporary_directory) @staticmethod - def _assert_not_rasa2_archive(temporary_directory: Path,) -> None: + def _assert_not_rasa2_archive( + temporary_directory: Path, + ) -> None: fingerprint_file = Path(temporary_directory) / "fingerprint.json" if fingerprint_file.is_file(): serialized_fingerprint = rasa.shared.utils.io.read_json_file( @@ -108,7 +111,8 @@ def _initialize_model_storage_from_model_archive( ) -> None: for path in (temporary_directory / MODEL_ARCHIVE_COMPONENTS_DIR).glob("*"): shutil.move( - str(path), str(storage_path), + str(path), + str(storage_path), ) @staticmethod @@ -183,7 +187,10 @@ def create_model_package( return model_metadata @staticmethod - def _persist_metadata(metadata: ModelMetadata, temporary_directory: Path,) -> None: + def _persist_metadata( + metadata: ModelMetadata, + temporary_directory: Path, + ) -> None: rasa.shared.utils.io.dump_obj_as_json_to_file( temporary_directory / MODEL_ARCHIVE_METADATA_FILE, metadata.as_dict() diff --git a/rasa/engine/storage/storage.py b/rasa/engine/storage/storage.py index 7f3fd970882a..f1ef0b09860e 100644 --- a/rasa/engine/storage/storage.py +++ b/rasa/engine/storage/storage.py @@ -139,7 +139,9 @@ class ModelMetadata: language: Optional[Text] training_type: TrainingType = TrainingType.BOTH - def __post_init__(self,) -> None: + def __post_init__( + self, + ) -> None: """Raises an exception when the meta data indicates an unsupported version. Raises: diff --git a/rasa/engine/training/components.py b/rasa/engine/training/components.py index b6064ed9dffe..fe7b7506f4ce 100644 --- a/rasa/engine/training/components.py +++ b/rasa/engine/training/components.py @@ -19,7 +19,8 @@ class PrecomputedValueProvider(GraphComponent): """ def __init__( - self, output: Cacheable, + self, + output: Cacheable, ): """Initializes a `PrecomputedValueProvider`. @@ -37,7 +38,9 @@ def create( execution_context: ExecutionContext, ) -> PrecomputedValueProvider: """Creates instance (see parent class for full docstring).""" - return cls(output=config["output"],) + return cls( + output=config["output"], + ) def get_value(self) -> Cacheable: """Returns the precomputed output.""" diff --git a/rasa/engine/training/graph_trainer.py b/rasa/engine/training/graph_trainer.py index 2875bc04aef5..153f9f111321 100644 --- a/rasa/engine/training/graph_trainer.py +++ b/rasa/engine/training/graph_trainer.py @@ -108,7 +108,9 @@ def train( graph_runner.run(inputs={PLACEHOLDER_IMPORTER: importer}) return self._model_storage.create_model_package( - output_filename, model_configuration, domain, + output_filename, + model_configuration, + domain, ) def fingerprint( diff --git a/rasa/engine/validation.py b/rasa/engine/validation.py index 2a8185eeee69..b490dd861f82 100644 --- a/rasa/engine/validation.py +++ b/rasa/engine/validation.py @@ -102,7 +102,10 @@ def _validate( _validate_constructor(node, create_fn_params) _validate_needs( - node, schema, create_fn_params, run_fn_params, + node, + schema, + create_fn_params, + run_fn_params, ) _validate_required_components(schema) @@ -125,7 +128,10 @@ def _validate_prediction_targets( def _validate_target( - target_name: Text, target_type: Text, expected_type: Type, schema: GraphSchema, + target_name: Text, + target_type: Text, + expected_type: Type, + schema: GraphSchema, ) -> None: if target_name not in schema.nodes: raise GraphSchemaValidationException( @@ -195,7 +201,10 @@ def _validate_interface_usage(node: SchemaNode) -> None: ) -def _validate_supported_languages(language: Optional[Text], node: SchemaNode,) -> None: +def _validate_supported_languages( + language: Optional[Text], + node: SchemaNode, +) -> None: supported_languages = node.uses.supported_languages() not_supported_languages = node.uses.not_supported_languages() @@ -378,7 +387,8 @@ def _validate_types_of_reserved_keywords( def _validate_constructor( - node: SchemaNode, create_fn_params: Dict[Text, ParameterInfo], + node: SchemaNode, + create_fn_params: Dict[Text, ParameterInfo], ) -> None: _validate_types_of_reserved_keywords(create_fn_params, node, node.constructor_name) @@ -497,11 +507,14 @@ def _validate_parent_return_type( ) -def _validate_required_components(schema: GraphSchema,) -> None: +def _validate_required_components( + schema: GraphSchema, +) -> None: unmet_requirements: Dict[Type, Set[Text]] = dict() for target_name in schema.target_names: unmet_requirements_for_target, _ = _recursively_check_required_components( - node_name=target_name, schema=schema, + node_name=target_name, + schema=schema, ) for component_type, node_names in unmet_requirements_for_target.items(): unmet_requirements.setdefault(component_type, set()).update(node_names) @@ -529,7 +542,8 @@ def _validate_required_components(schema: GraphSchema,) -> None: def _recursively_check_required_components( - node_name: Text, schema: GraphSchema, + node_name: Text, + schema: GraphSchema, ) -> Tuple[Dict[Type, Set[Text]], Set[Type]]: """Collects unmet requirements and types used in the subtree rooted at `node_name`. diff --git a/rasa/graph_components/providers/nlu_training_data_provider.py b/rasa/graph_components/providers/nlu_training_data_provider.py index 188953f36564..2494644f0c5c 100644 --- a/rasa/graph_components/providers/nlu_training_data_provider.py +++ b/rasa/graph_components/providers/nlu_training_data_provider.py @@ -14,7 +14,10 @@ class NLUTrainingDataProvider(GraphComponent): """Provides NLU training data during training.""" def __init__( - self, config: Dict[Text, Any], model_storage: ModelStorage, resource: Resource, + self, + config: Dict[Text, Any], + model_storage: ModelStorage, + resource: Resource, ) -> None: """Creates a new NLU training data provider.""" self._config = config @@ -45,7 +48,10 @@ def _persist(self, training_data: TrainingData) -> None: filename=DEFAULT_TRAINING_DATA_OUTPUT_PATH, ) - def provide(self, importer: TrainingDataImporter,) -> TrainingData: + def provide( + self, + importer: TrainingDataImporter, + ) -> TrainingData: """Provides nlu training data during training.""" if "language" in self._config: training_data = importer.get_nlu_data(language=self._config["language"]) diff --git a/rasa/graph_components/providers/story_graph_provider.py b/rasa/graph_components/providers/story_graph_provider.py index a915f2507967..cb59dabf41d7 100644 --- a/rasa/graph_components/providers/story_graph_provider.py +++ b/rasa/graph_components/providers/story_graph_provider.py @@ -33,7 +33,10 @@ def create( """Creates component (see parent class for full docstring).""" return cls(config) - def provide(self, importer: TrainingDataImporter,) -> StoryGraph: + def provide( + self, + importer: TrainingDataImporter, + ) -> StoryGraph: """Provides the story graph from the training data. Args: diff --git a/rasa/graph_components/validators/default_recipe_validator.py b/rasa/graph_components/validators/default_recipe_validator.py index ef175711889b..443dee73e6ef 100644 --- a/rasa/graph_components/validators/default_recipe_validator.py +++ b/rasa/graph_components/validators/default_recipe_validator.py @@ -354,7 +354,9 @@ def _warn_of_competition_with_regex_extractor( docs=f"{DOCS_URL_COMPONENTS}#regexentityextractor", ) - def _raise_if_featurizers_are_not_compatible(self,) -> None: + def _raise_if_featurizers_are_not_compatible( + self, + ) -> None: """Raises or warns if there are problems regarding the featurizers. Raises: @@ -408,7 +410,8 @@ def _warn_if_no_rule_policy_is_contained(self) -> None: ) def _raise_if_domain_contains_form_names_but_no_rule_policy_given( - self, domain: Domain, + self, + domain: Domain, ) -> None: """Validates that there exists a rule policy if forms are defined. @@ -430,7 +433,8 @@ def _raise_if_domain_contains_form_names_but_no_rule_policy_given( ) def _raise_if_a_rule_policy_is_incompatible_with_domain( - self, domain: Domain, + self, + domain: Domain, ) -> None: """Validates the rule policies against the domain. diff --git a/rasa/graph_components/validators/finetuning_validator.py b/rasa/graph_components/validators/finetuning_validator.py index ee479145460b..7623d195f3bc 100644 --- a/rasa/graph_components/validators/finetuning_validator.py +++ b/rasa/graph_components/validators/finetuning_validator.py @@ -83,7 +83,10 @@ def __init__( self._core = config["validate_core"] self._nlu = config["validate_nlu"] - def validate(self, importer: TrainingDataImporter,) -> TrainingDataImporter: + def validate( + self, + importer: TrainingDataImporter, + ) -> TrainingDataImporter: """Validates whether we can finetune Core and NLU when finetuning is enabled. Args: @@ -187,7 +190,10 @@ def _validate(self, importer: TrainingDataImporter) -> None: self.persist() def _compare_or_memorize( - self, fingerprint_key: Text, new_fingerprint: Text, error_message: Text, + self, + fingerprint_key: Text, + new_fingerprint: Text, + error_message: Text, ) -> None: """Compares given fingerprint if we are finetuning, otherwise just saves it. @@ -222,7 +228,9 @@ def _get_fingerprint_of_domain_pruned_for_core(domain: Domain) -> Text: pruned_domain = DomainForCoreTrainingProvider.create_pruned_version(domain) return pruned_domain.fingerprint() - def _get_fingerprint_of_schema_without_irrelevant_keys(self,) -> Text: + def _get_fingerprint_of_schema_without_irrelevant_keys( + self, + ) -> Text: """Returns a fingerprint of the given schema with certain items removed. These items include specifications that do not influence actual training diff --git a/rasa/model_training.py b/rasa/model_training.py index 71650fe7d0be..970219017593 100644 --- a/rasa/model_training.py +++ b/rasa/model_training.py @@ -165,7 +165,8 @@ def train( training_type = TrainingType.CORE with telemetry.track_model_training( - file_importer, model_type="rasa", + file_importer, + model_type="rasa", ): return _train_graph( file_importer, @@ -232,7 +233,8 @@ def _train_graph( full_model_path = Path(output_path, model_name) with telemetry.track_model_training( - file_importer, model_type=training_type.model_type, + file_importer, + model_type=training_type.model_type, ): trainer.train( model_configuration, diff --git a/rasa/nlu/classifiers/diet_classifier.py b/rasa/nlu/classifiers/diet_classifier.py index bf5016ecf2ca..42f2ccabe04d 100644 --- a/rasa/nlu/classifiers/diet_classifier.py +++ b/rasa/nlu/classifiers/diet_classifier.py @@ -968,7 +968,10 @@ def _predict_label( ranking = [(idx, casted_message_sim[idx]) for idx in ranked_label_indices] label_ranking = [ - {"name": self.index_label_id_mapping[label_idx], "confidence": score,} + { + "name": self.index_label_id_mapping[label_idx], + "confidence": score, + } for label_idx, score in ranking ] @@ -1726,7 +1729,8 @@ def batch_predict( tf_batch_data, TEXT ) sentence_feature_lengths = self._get_sentence_feature_lengths( - tf_batch_data, TEXT, + tf_batch_data, + TEXT, ) text_transformed, _, _, _, _, attention_weights = self._tf_layers[ diff --git a/rasa/nlu/classifiers/fallback_classifier.py b/rasa/nlu/classifiers/fallback_classifier.py index 4e1e56efd43a..a0ccdae7bbb9 100644 --- a/rasa/nlu/classifiers/fallback_classifier.py +++ b/rasa/nlu/classifiers/fallback_classifier.py @@ -52,7 +52,10 @@ def get_default_config() -> Dict[Text, Any]: AMBIGUITY_THRESHOLD_KEY: DEFAULT_NLU_FALLBACK_AMBIGUITY_THRESHOLD, } - def __init__(self, config: Dict[Text, Any],) -> None: + def __init__( + self, + config: Dict[Text, Any], + ) -> None: """Constructs a new fallback classifier.""" self.component_config = config diff --git a/rasa/nlu/classifiers/keyword_intent_classifier.py b/rasa/nlu/classifiers/keyword_intent_classifier.py index 70506b037881..a7a895c5b9ae 100644 --- a/rasa/nlu/classifiers/keyword_intent_classifier.py +++ b/rasa/nlu/classifiers/keyword_intent_classifier.py @@ -60,7 +60,10 @@ def create( """Creates a new untrained component (see parent class for full docstring).""" return cls(config, model_storage, resource, execution_context) - def train(self, training_data: TrainingData,) -> Resource: + def train( + self, + training_data: TrainingData, + ) -> Resource: """Trains the intent classifier on a data set.""" duplicate_examples = set() for ex in training_data.intent_examples: @@ -176,5 +179,9 @@ def load( intent_keyword_map = None return cls( - config, model_storage, resource, execution_context, intent_keyword_map, + config, + model_storage, + resource, + execution_context, + intent_keyword_map, ) diff --git a/rasa/nlu/classifiers/sklearn_intent_classifier.py b/rasa/nlu/classifiers/sklearn_intent_classifier.py index e0a1516a5f06..746cca6e2c8f 100644 --- a/rasa/nlu/classifiers/sklearn_intent_classifier.py +++ b/rasa/nlu/classifiers/sklearn_intent_classifier.py @@ -285,7 +285,13 @@ def load( encoder = LabelEncoder() encoder.classes_ = classes - return cls(config, model_storage, resource, classifier, encoder,) + return cls( + config, + model_storage, + resource, + classifier, + encoder, + ) except ValueError: logger.debug( f"Failed to load '{cls.__name__}' from model storage. Resource " diff --git a/rasa/nlu/extractors/entity_synonyms.py b/rasa/nlu/extractors/entity_synonyms.py index 3f6ffb676720..3f0ba26018df 100644 --- a/rasa/nlu/extractors/entity_synonyms.py +++ b/rasa/nlu/extractors/entity_synonyms.py @@ -62,7 +62,10 @@ def create( """Creates component (see parent class for full docstring).""" return cls(config, model_storage, resource, synonyms) - def train(self, training_data: TrainingData,) -> Resource: + def train( + self, + training_data: TrainingData, + ) -> Resource: """Trains the synonym lookup table.""" for key, value in list(training_data.entity_synonyms.items()): self._add_entities_if_synonyms(key, value) diff --git a/rasa/nlu/extractors/mitie_entity_extractor.py b/rasa/nlu/extractors/mitie_entity_extractor.py index 3d855e8561b5..1433b87a5f0d 100644 --- a/rasa/nlu/extractors/mitie_entity_extractor.py +++ b/rasa/nlu/extractors/mitie_entity_extractor.py @@ -191,7 +191,11 @@ def _prepare_mitie_sample(training_example: Message) -> Any: continue return sample - def process(self, messages: List[Message], model: MitieModel,) -> List[Message]: + def process( + self, + messages: List[Message], + model: MitieModel, + ) -> List[Message]: """Extracts entities from messages and appends them to the attribute. If no patterns where found during training, then the given messages will not @@ -217,7 +221,9 @@ def process(self, messages: List[Message], model: MitieModel,) -> List[Message]: return messages def _extract_entities( - self, message: Message, mitie_model: MitieModel, + self, + message: Message, + mitie_model: MitieModel, ) -> List[Dict[Text, Any]]: """Extract entities of the given type from the given user message. @@ -234,7 +240,8 @@ def _extract_entities( entities = [] token_texts = [token.text for token in tokens] mitie_entities = self._ner.extract_entities( - token_texts, mitie_model.word_feature_extractor, + token_texts, + mitie_model.word_feature_extractor, ) for e in mitie_entities: if len(e[0]): diff --git a/rasa/nlu/extractors/regex_entity_extractor.py b/rasa/nlu/extractors/regex_entity_extractor.py index 228186ccc46c..984865067792 100644 --- a/rasa/nlu/extractors/regex_entity_extractor.py +++ b/rasa/nlu/extractors/regex_entity_extractor.py @@ -210,7 +210,11 @@ def load( f"could not be extracted from the given training data - and hence " f"could not be persisted." ) - return cls(config, model_storage=model_storage, resource=resource,) + return cls( + config, + model_storage=model_storage, + resource=resource, + ) def persist(self) -> None: """Persist this model.""" diff --git a/rasa/nlu/featurizers/dense_featurizer/convert_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/convert_featurizer.py index f688d7556da7..f902cb0336a3 100644 --- a/rasa/nlu/featurizers/dense_featurizer/convert_featurizer.py +++ b/rasa/nlu/featurizers/dense_featurizer/convert_featurizer.py @@ -337,7 +337,10 @@ def _sequence_encoding_of_text(self, batch: List[Text]) -> np.ndarray: "sequence_encoding" ].numpy() - def process_training_data(self, training_data: TrainingData,) -> TrainingData: + def process_training_data( + self, + training_data: TrainingData, + ) -> TrainingData: """Featurize all message attributes in the training data with the ConveRT model. Args: diff --git a/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py index f795a6e4001e..3e88e54432e5 100644 --- a/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py +++ b/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py @@ -57,7 +57,9 @@ def required_components(cls) -> List[Type]: return [Tokenizer] def __init__( - self, config: Dict[Text, Any], execution_context: ExecutionContext, + self, + config: Dict[Text, Any], + execution_context: ExecutionContext, ) -> None: """Initializes the featurizer with the model in the config.""" super(LanguageModelFeaturizer, self).__init__( @@ -701,7 +703,10 @@ def _get_docs_for_batch( return batch_docs - def process_training_data(self, training_data: TrainingData,) -> TrainingData: + def process_training_data( + self, + training_data: TrainingData, + ) -> TrainingData: """Computes tokens and dense features for each message in training data. Args: diff --git a/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py index 40455dd0c2f3..ded6df421fbb 100644 --- a/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py +++ b/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py @@ -57,7 +57,9 @@ def required_packages() -> List[Text]: return ["mitie", "numpy"] def __init__( - self, config: Dict[Text, Any], execution_context: ExecutionContext, + self, + config: Dict[Text, Any], + execution_context: ExecutionContext, ) -> None: """Instantiates a new `MitieFeaturizer` instance.""" super().__init__(execution_context.node_name, config) diff --git a/rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py index fdd0d1c03be5..3d56076dbcdd 100644 --- a/rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py +++ b/rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py @@ -47,7 +47,11 @@ def get_default_config() -> Dict[Text, Any]: POOLING: MEAN_POOLING, } - def __init__(self, config: Dict[Text, Any], name: Text,) -> None: + def __init__( + self, + config: Dict[Text, Any], + name: Text, + ) -> None: """Initializes SpacyFeaturizer.""" super().__init__(name, config) self.pooling_operation = self._config[POOLING] diff --git a/rasa/nlu/featurizers/featurizer.py b/rasa/nlu/featurizers/featurizer.py index 2325f65686b9..06d5e522980e 100644 --- a/rasa/nlu/featurizers/featurizer.py +++ b/rasa/nlu/featurizers/featurizer.py @@ -59,7 +59,12 @@ def add_features_to_message( (FEATURE_TYPE_SENTENCE, sentence), ]: if features is not None: - wrapped_feature = Features(features, type, attribute, self._identifier,) + wrapped_feature = Features( + features, + type, + attribute, + self._identifier, + ) message.add_features(wrapped_feature) @staticmethod diff --git a/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py index b342c04b0029..da10ded97b06 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py @@ -605,7 +605,9 @@ def _get_featurized_attribute( return [], [] def train( - self, training_data: TrainingData, model: Optional[SpacyModel] = None, + self, + training_data: TrainingData, + model: Optional[SpacyModel] = None, ) -> Resource: """Trains the featurizer. diff --git a/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py index 2c3d5e91f107..7dc4adf20d4a 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py @@ -102,7 +102,11 @@ class LexicalSyntacticFeaturizer(SparseFeaturizer, GraphComponent): @classmethod def _extract_raw_features_from_token( - cls, feature_name: Text, token: Token, token_position: int, num_tokens: int, + cls, + feature_name: Text, + token: Token, + token_position: int, + num_tokens: int, ) -> Text: """Extracts a raw feature from the token at the given position. diff --git a/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py index 1219d98227d5..b2bb0754a42d 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py @@ -116,7 +116,10 @@ def _merge_new_patterns(self, new_patterns: List[Dict[Text, Text]]) -> None: else: self.known_patterns.append(extra_pattern) - def train(self, training_data: TrainingData,) -> Resource: + def train( + self, + training_data: TrainingData, + ) -> Resource: """Trains the component with all patterns extracted from training data.""" patterns_from_data = pattern_utils.extract_patterns( training_data, diff --git a/rasa/nlu/selectors/response_selector.py b/rasa/nlu/selectors/response_selector.py index c5c925ef99fa..0137468a09e3 100644 --- a/rasa/nlu/selectors/response_selector.py +++ b/rasa/nlu/selectors/response_selector.py @@ -831,7 +831,8 @@ def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]: # effective sequence length of these features being 1 or 0. # We need to combine the two lengths to correctly get the last position. sentence_feature_lengths = self._get_sentence_feature_lengths( - self.tf_label_data, LABEL, + self.tf_label_data, + LABEL, ) sentence_label = self._last_token( label_transformed, sequence_feature_lengths + sentence_feature_lengths diff --git a/rasa/nlu/test.py b/rasa/nlu/test.py index b5a4c750bce4..616a35462103 100644 --- a/rasa/nlu/test.py +++ b/rasa/nlu/test.py @@ -593,7 +593,10 @@ def evaluate_intents( ) report, precision, f1, accuracy, confusion_matrix, labels = _calculate_report( - output_directory, target_intents, predicted_intents, report_as_dict, + output_directory, + target_intents, + predicted_intents, + report_as_dict, ) if output_directory: _dump_report(output_directory, "intent_report.json", report) @@ -670,7 +673,10 @@ def _calculate_report( report_as_dict = bool(output_directory) report, precision, f1, accuracy = get_evaluation_metrics( - targets, predictions, output_dict=report_as_dict, exclude_label=exclude_label, + targets, + predictions, + output_dict=report_as_dict, + exclude_label=exclude_label, ) if report_as_dict: @@ -1382,7 +1388,8 @@ async def run_evaluation( from rasa.shared.constants import DEFAULT_DOMAIN_PATH test_data_importer = TrainingDataImporter.load_from_dict( - training_data_paths=[data_path], domain_path=DEFAULT_DOMAIN_PATH, + training_data_paths=[data_path], + domain_path=DEFAULT_DOMAIN_PATH, ) test_data = test_data_importer.get_nlu_data() diff --git a/rasa/nlu/tokenizers/jieba_tokenizer.py b/rasa/nlu/tokenizers/jieba_tokenizer.py index 8bf2a144069c..c936f052b344 100644 --- a/rasa/nlu/tokenizers/jieba_tokenizer.py +++ b/rasa/nlu/tokenizers/jieba_tokenizer.py @@ -44,7 +44,10 @@ def get_default_config() -> Dict[Text, Any]: } def __init__( - self, config: Dict[Text, Any], model_storage: ModelStorage, resource: Resource, + self, + config: Dict[Text, Any], + model_storage: ModelStorage, + resource: Resource, ) -> None: """Initialize the tokenizer.""" super().__init__(config) diff --git a/rasa/server.py b/rasa/server.py index ba0b00514b9b..7d08f6822a9d 100644 --- a/rasa/server.py +++ b/rasa/server.py @@ -79,7 +79,8 @@ response.HTTPResponse, Coroutine[Any, Any, response.HTTPResponse] ] SanicView = Callable[ - [Arg(Request, "request"), VarArg(), KwArg()], SanicResponse, # noqa: F821 + [Arg(Request, "request"), VarArg(), KwArg()], + SanicResponse, # noqa: F821 ] diff --git a/rasa/shared/core/domain.py b/rasa/shared/core/domain.py index e49d4d531c0b..4c0e8301eb2a 100644 --- a/rasa/shared/core/domain.py +++ b/rasa/shared/core/domain.py @@ -233,7 +233,8 @@ def _get_session_config(session_config: Dict) -> SessionConfig: session_expiration_time_min = DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES carry_over_slots = session_config.get( - CARRY_OVER_SLOTS_KEY, DEFAULT_CARRY_OVER_SLOTS_TO_NEW_SESSION, + CARRY_OVER_SLOTS_KEY, + DEFAULT_CARRY_OVER_SLOTS_TO_NEW_SESSION, ) return SessionConfig(session_expiration_time_min, carry_over_slots) @@ -1012,7 +1013,8 @@ def _get_user_sub_state( @staticmethod def _get_slots_sub_state( - tracker: "DialogueStateTracker", omit_unset_slots: bool = False, + tracker: "DialogueStateTracker", + omit_unset_slots: bool = False, ) -> Dict[Text, Union[Text, Tuple[float]]]: """Sets all set slots with the featurization of the stored value. @@ -1085,7 +1087,9 @@ def _clean_state(state: State) -> State: } def get_active_state( - self, tracker: "DialogueStateTracker", omit_unset_slots: bool = False, + self, + tracker: "DialogueStateTracker", + omit_unset_slots: bool = False, ) -> State: """Given a dialogue tracker, makes a representation of current dialogue state. @@ -1112,7 +1116,8 @@ def get_active_state( @staticmethod def _remove_rule_only_features( - state: State, rule_only_data: Optional[Dict[Text, Any]], + state: State, + rule_only_data: Optional[Dict[Text, Any]], ) -> None: if not rule_only_data: return diff --git a/rasa/shared/core/events.py b/rasa/shared/core/events.py index bd4fad0dbc0c..bf9e85b81e36 100644 --- a/rasa/shared/core/events.py +++ b/rasa/shared/core/events.py @@ -82,7 +82,11 @@ ) IntentPrediction = TypedDict( - "IntentPrediction", {INTENT_NAME_KEY: Text, PREDICTED_CONFIDENCE_KEY: float,}, + "IntentPrediction", + { + INTENT_NAME_KEY: Text, + PREDICTED_CONFIDENCE_KEY: float, + }, ) NLUPredictionData = TypedDict( "NLUPredictionData", @@ -553,7 +557,9 @@ def as_dict(self) -> Dict[Text, Any]: ) return _dict - def as_sub_state(self,) -> Dict[Text, Union[None, Text, List[Optional[Text]]]]: + def as_sub_state( + self, + ) -> Dict[Text, Union[None, Text, List[Optional[Text]]]]: """Turns a UserUttered event into features. The substate contains information about entities, intent and text of the diff --git a/rasa/shared/core/slot_mappings.py b/rasa/shared/core/slot_mappings.py index 2c98563ee057..3fc711c04222 100644 --- a/rasa/shared/core/slot_mappings.py +++ b/rasa/shared/core/slot_mappings.py @@ -74,7 +74,9 @@ def validate(mapping: Dict[Text, Any], slot_name: Text) -> None: @staticmethod def _get_active_loop_ignored_intents( - mapping: Dict[Text, Any], domain: "Domain", active_loop_name: Text, + mapping: Dict[Text, Any], + domain: "Domain", + active_loop_name: Text, ) -> List[Text]: from rasa.shared.core.constants import ACTIVE_LOOP @@ -99,7 +101,9 @@ def _get_active_loop_ignored_intents( @staticmethod def intent_is_desired( - mapping: Dict[Text, Any], tracker: "DialogueStateTracker", domain: "Domain", + mapping: Dict[Text, Any], + tracker: "DialogueStateTracker", + domain: "Domain", ) -> bool: """Checks whether user intent matches slot mapping intent specifications.""" mapping_intents = SlotMapping.to_list(mapping.get(INTENT, [])) @@ -133,7 +137,8 @@ def to_list(x: Optional[Any]) -> List[Any]: @staticmethod def entity_is_desired( - mapping: Dict[Text, Any], tracker: "DialogueStateTracker", + mapping: Dict[Text, Any], + tracker: "DialogueStateTracker", ) -> bool: """Checks whether slot should be filled by an entity in the input or not. diff --git a/rasa/shared/core/training_data/loading.py b/rasa/shared/core/training_data/loading.py index d3ddd397a2d1..1da85dc7e8da 100644 --- a/rasa/shared/core/training_data/loading.py +++ b/rasa/shared/core/training_data/loading.py @@ -15,7 +15,10 @@ logger = logging.getLogger(__name__) -def _get_reader(filename: Text, domain: Domain,) -> StoryReader: +def _get_reader( + filename: Text, + domain: Domain, +) -> StoryReader: if rasa.shared.data.is_likely_yaml_file(filename): return YAMLStoryReader(domain, filename) else: @@ -24,7 +27,10 @@ def _get_reader(filename: Text, domain: Domain,) -> StoryReader: return _guess_reader(filename, domain) -def _guess_reader(filename: Text, domain: Domain,) -> StoryReader: +def _guess_reader( + filename: Text, + domain: Domain, +) -> StoryReader: if YAMLStoryReader.is_stories_file(filename): return YAMLStoryReader(domain, filename) @@ -36,7 +42,9 @@ def _guess_reader(filename: Text, domain: Domain,) -> StoryReader: def load_data_from_resource( - resource: Union[Text], domain: Domain, exclusion_percentage: Optional[int] = None, + resource: Union[Text], + domain: Domain, + exclusion_percentage: Optional[int] = None, ) -> List["StoryStep"]: """Loads core training data from the specified folder. @@ -53,12 +61,16 @@ def load_data_from_resource( raise ValueError(f"Resource '{resource}' does not exist.") return load_data_from_files( - rasa.shared.utils.io.list_files(resource), domain, exclusion_percentage, + rasa.shared.utils.io.list_files(resource), + domain, + exclusion_percentage, ) def load_data_from_files( - story_files: List[Text], domain: Domain, exclusion_percentage: Optional[int] = None, + story_files: List[Text], + domain: Domain, + exclusion_percentage: Optional[int] = None, ) -> List["StoryStep"]: """Loads core training data from the specified files. diff --git a/rasa/shared/core/training_data/story_reader/story_reader.py b/rasa/shared/core/training_data/story_reader/story_reader.py index abb571a27f01..8f1a98dc7298 100644 --- a/rasa/shared/core/training_data/story_reader/story_reader.py +++ b/rasa/shared/core/training_data/story_reader/story_reader.py @@ -17,7 +17,9 @@ class StoryReader: """Helper class to read a story file.""" def __init__( - self, domain: Optional[Domain] = None, source_name: Optional[Text] = None, + self, + domain: Optional[Domain] = None, + source_name: Optional[Text] = None, ) -> None: """Constructor for the StoryReader. diff --git a/rasa/shared/core/training_data/story_reader/yaml_story_reader.py b/rasa/shared/core/training_data/story_reader/yaml_story_reader.py index 76e3bdc17ecc..6d14e724020d 100644 --- a/rasa/shared/core/training_data/story_reader/yaml_story_reader.py +++ b/rasa/shared/core/training_data/story_reader/yaml_story_reader.py @@ -656,7 +656,10 @@ def unpack_regex_message( PREDICTED_CONFIDENCE_KEY: confidence, } intent_ranking = [ - {INTENT_NAME_KEY: intent_name, PREDICTED_CONFIDENCE_KEY: confidence,} + { + INTENT_NAME_KEY: intent_name, + PREDICTED_CONFIDENCE_KEY: confidence, + } ] message_data = {} message_data[TEXT] = user_text diff --git a/rasa/shared/core/training_data/visualization.py b/rasa/shared/core/training_data/visualization.py index 99dd98b21ba1..3df2ff352f4f 100644 --- a/rasa/shared/core/training_data/visualization.py +++ b/rasa/shared/core/training_data/visualization.py @@ -256,7 +256,9 @@ def _merge_equivalent_nodes(graph: "networkx.MultiDiGraph", max_history: int) -> def _replace_edge_labels_with_nodes( - graph: "networkx.MultiDiGraph", next_id: int, nlu_training_data: "TrainingData", + graph: "networkx.MultiDiGraph", + next_id: int, + nlu_training_data: "TrainingData", ) -> None: """Replaces edge labels with nodes. diff --git a/rasa/shared/data.py b/rasa/shared/data.py index ffabb6923725..d33b799f729b 100644 --- a/rasa/shared/data.py +++ b/rasa/shared/data.py @@ -56,7 +56,9 @@ def get_core_directory(paths: Optional[Union[Text, List[Text]]]) -> Text: return _copy_files_to_new_dir(core_files) -def get_nlu_directory(paths: Optional[Union[Text, List[Text]]],) -> Text: +def get_nlu_directory( + paths: Optional[Union[Text, List[Text]]], +) -> Text: """Recursively collects all NLU training files from a list of paths. Args: diff --git a/rasa/shared/importers/importer.py b/rasa/shared/importers/importer.py index 946ec0ff4e51..304cfbcc327d 100644 --- a/rasa/shared/importers/importer.py +++ b/rasa/shared/importers/importer.py @@ -29,7 +29,10 @@ def get_domain(self) -> Domain: """ raise NotImplementedError() - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories( + self, + exclusion_percentage: Optional[int] = None, + ) -> StoryGraph: """Retrieves the stories that should be used for training. Args: @@ -204,7 +207,10 @@ def get_domain(self) -> Domain: """Retrieves model domain (see parent class for full docstring).""" return Domain.empty() - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories( + self, + exclusion_percentage: Optional[int] = None, + ) -> StoryGraph: """Retrieves training stories / rules (see parent class for full docstring).""" return StoryGraph([]) @@ -248,7 +254,10 @@ def get_domain(self) -> Domain: ) @rasa.shared.utils.common.cached_method - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories( + self, + exclusion_percentage: Optional[int] = None, + ) -> StoryGraph: """Retrieves training stories / rules (see parent class for full docstring).""" stories = [ importer.get_stories(exclusion_percentage) for importer in self._importers @@ -370,7 +379,10 @@ def _get_domain_with_retrieval_intents( {}, ) - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories( + self, + exclusion_percentage: Optional[int] = None, + ) -> StoryGraph: """Retrieves training stories / rules (see parent class for full docstring).""" return self._importer.get_stories(exclusion_percentage) @@ -450,7 +462,10 @@ def _get_domain_with_e2e_actions(self) -> Domain: action_texts=additional_e2e_action_names, ) - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories( + self, + exclusion_percentage: Optional[int] = None, + ) -> StoryGraph: """Retrieves the stories that should be used for training. See parent class for details. diff --git a/rasa/shared/importers/multi_project.py b/rasa/shared/importers/multi_project.py index 0ee50e9e82e9..b0820562cc3c 100644 --- a/rasa/shared/importers/multi_project.py +++ b/rasa/shared/importers/multi_project.py @@ -178,10 +178,15 @@ def get_domain(self) -> Domain: lambda merged, other: merged.merge(other), domains, Domain.empty() ) - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories( + self, + exclusion_percentage: Optional[int] = None, + ) -> StoryGraph: """Retrieves training stories / rules (see parent class for full docstring).""" return utils.story_graph_from_paths( - self._story_paths, self.get_domain(), exclusion_percentage, + self._story_paths, + self.get_domain(), + exclusion_percentage, ) def get_conversation_tests(self) -> StoryGraph: diff --git a/rasa/shared/importers/rasa.py b/rasa/shared/importers/rasa.py index ae2ca838fa36..af0ca9fb2e07 100644 --- a/rasa/shared/importers/rasa.py +++ b/rasa/shared/importers/rasa.py @@ -46,16 +46,22 @@ def get_config(self) -> Dict: """Retrieves model config (see parent class for full docstring).""" return self.config - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories( + self, + exclusion_percentage: Optional[int] = None, + ) -> StoryGraph: """Retrieves training stories / rules (see parent class for full docstring).""" return utils.story_graph_from_paths( - self._story_files, self.get_domain(), exclusion_percentage, + self._story_files, + self.get_domain(), + exclusion_percentage, ) def get_conversation_tests(self) -> StoryGraph: """Retrieves conversation test stories (see parent class for full docstring).""" return utils.story_graph_from_paths( - self._conversation_test_files, self.get_domain(), + self._conversation_test_files, + self.get_domain(), ) def get_nlu_data(self, language: Optional[Text] = "en") -> TrainingData: diff --git a/rasa/shared/importers/utils.py b/rasa/shared/importers/utils.py index cb6119b7eb51..a71032c2c8e6 100644 --- a/rasa/shared/importers/utils.py +++ b/rasa/shared/importers/utils.py @@ -13,7 +13,9 @@ def training_data_from_paths(paths: Iterable[Text], language: Text) -> TrainingD def story_graph_from_paths( - files: List[Text], domain: Domain, exclusion_percentage: Optional[int] = None, + files: List[Text], + domain: Domain, + exclusion_percentage: Optional[int] = None, ) -> StoryGraph: """Returns the `StoryGraph` from paths.""" from rasa.shared.core.training_data import loading diff --git a/rasa/shared/nlu/training_data/features.py b/rasa/shared/nlu/training_data/features.py index c2266e2b8779..324120ef990d 100644 --- a/rasa/shared/nlu/training_data/features.py +++ b/rasa/shared/nlu/training_data/features.py @@ -205,7 +205,8 @@ def filter( @staticmethod def groupby_attribute( - features_list: List[Features], attributes: Optional[Iterable[Text]] = None, + features_list: List[Features], + attributes: Optional[Iterable[Text]] = None, ) -> Dict[Text, List[Features]]: """Groups the given features according to their attribute. @@ -236,7 +237,8 @@ def groupby_attribute( @staticmethod def combine( - features_list: List[Features], expected_origins: Optional[List[Text]] = None, + features_list: List[Features], + expected_origins: Optional[List[Text]] = None, ) -> Features: """Combine features of the same type and level that describe the same attribute. @@ -361,11 +363,14 @@ def reduce( # sequence feature that is (not) sparse before sentence feature that is # (not) sparse sublist = Features.filter( - features_list=features_list, type=type, is_sparse=is_sparse, + features_list=features_list, + type=type, + is_sparse=is_sparse, ) if sublist: combined_feature = Features.combine( - sublist, expected_origins=expected_origins, + sublist, + expected_origins=expected_origins, ) output.append(combined_feature) return output diff --git a/rasa/shared/nlu/training_data/formats/dialogflow.py b/rasa/shared/nlu/training_data/formats/dialogflow.py index 76d8ad8cf78c..115903444b30 100644 --- a/rasa/shared/nlu/training_data/formats/dialogflow.py +++ b/rasa/shared/nlu/training_data/formats/dialogflow.py @@ -130,10 +130,20 @@ def _read_entities( if entity["isRegexp"]: regex_features = DialogflowReader._extract_regex_features(entity, examples) - return TrainingData([], entity_synonyms, regex_features, [],) + return TrainingData( + [], + entity_synonyms, + regex_features, + [], + ) else: lookup_tables = DialogflowReader._extract_lookup_tables(entity, examples) - return TrainingData([], entity_synonyms, [], lookup_tables,) + return TrainingData( + [], + entity_synonyms, + [], + lookup_tables, + ) @staticmethod def _read_examples( diff --git a/rasa/shared/utils/schemas/events.py b/rasa/shared/utils/schemas/events.py index ba039a4eac94..7d3509de4919 100644 --- a/rasa/shared/utils/schemas/events.py +++ b/rasa/shared/utils/schemas/events.py @@ -18,7 +18,10 @@ INTENT = { "type": "object", - "properties": {"name": {"type": "string"}, "confidence": {"type": "number"},}, + "properties": { + "name": {"type": "string"}, + "confidence": {"type": "number"}, + }, } RESPONSE_SCHEMA = { diff --git a/rasa/telemetry.py b/rasa/telemetry.py index 4a5377525ee1..09cce6e6fae3 100644 --- a/rasa/telemetry.py +++ b/rasa/telemetry.py @@ -1013,7 +1013,10 @@ def track_nlu_model_test(test_data: "TrainingData") -> None: @ensure_telemetry_enabled def track_markers_extraction_initiated( - strategy: Text, only_extract: bool, seed: bool, count: Optional[int], + strategy: Text, + only_extract: bool, + seed: bool, + count: Optional[int], ) -> None: """Track when a user tries to extract success markers. @@ -1043,7 +1046,8 @@ def track_markers_extracted(trackers_count: int) -> None: trackers_count: The actual number of trackers processed """ _track( - TELEMETRY_MARKERS_EXTRACTED_EVENT, {"trackers_count": trackers_count}, + TELEMETRY_MARKERS_EXTRACTED_EVENT, + {"trackers_count": trackers_count}, ) @@ -1055,7 +1059,8 @@ def track_markers_stats_computed(trackers_count: int) -> None: trackers_count: The actual number of trackers processed """ _track( - TELEMETRY_MARKERS_STATS_COMPUTED_EVENT, {"trackers_count": trackers_count}, + TELEMETRY_MARKERS_STATS_COMPUTED_EVENT, + {"trackers_count": trackers_count}, ) diff --git a/rasa/utils/common.py b/rasa/utils/common.py index 9974a5e81cc5..9f6980b1aa4e 100644 --- a/rasa/utils/common.py +++ b/rasa/utils/common.py @@ -43,7 +43,10 @@ "Converting sparse IndexedSlices.* to a dense Tensor of unknown " "shape. This may consume a large amount of memory.", ), - (UserWarning, "Slot auto-fill has been removed in 3.0 .*",), + ( + UserWarning, + "Slot auto-fill has been removed in 3.0 .*", + ), ] @@ -213,7 +216,8 @@ def update_sanic_log_level( ) socktype = SOCK_STREAM if syslog_protocol == TCP_PROTOCOL else SOCK_DGRAM syslog_handler = logging.handlers.SysLogHandler( - address=(syslog_address, syslog_port), socktype=socktype, + address=(syslog_address, syslog_port), + socktype=socktype, ) syslog_handler.setFormatter(formatter) logger.addHandler(syslog_handler) diff --git a/rasa/utils/plotting.py b/rasa/utils/plotting.py index b7ba782f3e79..84f7c24970ad 100644 --- a/rasa/utils/plotting.py +++ b/rasa/utils/plotting.py @@ -221,7 +221,10 @@ def plot_paired_histogram( title: Text, output_file: Optional[Text] = None, num_bins: int = 25, - colors: Tuple[Text, Text] = ("#009292", "#920000",), # (dark cyan, dark red) + colors: Tuple[Text, Text] = ( + "#009292", + "#920000", + ), # (dark cyan, dark red) axes_label: Tuple[Text, Text] = ("Correct", "Wrong"), frame_label: Tuple[Text, Text] = ("Number of Samples", "Confidence"), density: bool = False, diff --git a/rasa/utils/tensorflow/layers.py b/rasa/utils/tensorflow/layers.py index 87b168592c28..4b653cfd964f 100644 --- a/rasa/utils/tensorflow/layers.py +++ b/rasa/utils/tensorflow/layers.py @@ -1439,7 +1439,9 @@ def _sample_candidates( ) pos_labels_embed = tf.expand_dims( - batch_labels_embed, axis=1, name="expand_pos_labels", + batch_labels_embed, + axis=1, + name="expand_pos_labels", ) # Pick random examples from the batch @@ -1457,7 +1459,9 @@ def _sample_candidates( # Get binary indicators of whether a candidate is positive or not pos_neg_indicators = self._get_pos_neg_indicators( - all_labels_ids, batch_labels_ids, candidate_ids, + all_labels_ids, + batch_labels_ids, + candidate_ids, ) return ( diff --git a/rasa/utils/tensorflow/layers_utils.py b/rasa/utils/tensorflow/layers_utils.py index 7d97eb997c69..4a62f0a6d6d8 100644 --- a/rasa/utils/tensorflow/layers_utils.py +++ b/rasa/utils/tensorflow/layers_utils.py @@ -64,7 +64,8 @@ def get_candidate_values( ``` """ tiled_x = tf.tile( - tf.expand_dims(batch_flatten(x), 0), (tf.shape(candidate_ids)[0], 1, 1), + tf.expand_dims(batch_flatten(x), 0), + (tf.shape(candidate_ids)[0], 1, 1), ) candidate_values = tf.gather(tiled_x, candidate_ids, batch_dims=1) diff --git a/rasa/utils/tensorflow/models.py b/rasa/utils/tensorflow/models.py index f1575b593581..ec82ee4e3f6c 100644 --- a/rasa/utils/tensorflow/models.py +++ b/rasa/utils/tensorflow/models.py @@ -304,7 +304,10 @@ def run_inference( """ outputs = {} (data_generator, _,) = rasa.utils.train_utils.create_data_generators( - model_data=model_data, batch_sizes=batch_size, epochs=1, shuffle=False, + model_data=model_data, + batch_sizes=batch_size, + epochs=1, + shuffle=False, ) data_iterator = iter(data_generator) while True: @@ -555,7 +558,8 @@ def __init__( label_data: RasaModelData, ) -> None: super().__init__( - name=name, random_seed=config[RANDOM_SEED], + name=name, + random_seed=config[RANDOM_SEED], ) self.config = config @@ -765,7 +769,10 @@ def _prepare_ffnn_layer( ) def _prepare_dot_product_loss( - self, name: Text, scale_loss: bool, prefix: Text = "loss", + self, + name: Text, + scale_loss: bool, + prefix: Text = "loss", ) -> None: self._tf_layers[f"{prefix}.{name}"] = self.dot_product_loss_layer( self.config[NUM_NEG], @@ -840,7 +847,9 @@ def _get_sequence_feature_lengths( return tf.zeros([batch_dim], dtype=tf.int32) def _get_sentence_feature_lengths( - self, tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]], key: Text, + self, + tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]], + key: Text, ) -> tf.Tensor: """Fetches the sequence lengths of sentence-level features per input example. diff --git a/rasa/utils/tensorflow/rasa_layers.py b/rasa/utils/tensorflow/rasa_layers.py index 149aeff474d7..6730b72a28bc 100644 --- a/rasa/utils/tensorflow/rasa_layers.py +++ b/rasa/utils/tensorflow/rasa_layers.py @@ -253,7 +253,10 @@ def _check_sparse_input_units( ) def _prepare_layers_for_sparse_tensors( - self, attribute: Text, feature_type: Text, config: Dict[Text, Any], + self, + attribute: Text, + feature_type: Text, + config: Dict[Text, Any], ) -> None: """Sets up sparse tensor pre-processing before combining with dense ones.""" # For optionally applying dropout to sparse tensors diff --git a/rasa/utils/train_utils.py b/rasa/utils/train_utils.py index 7e7c6d233876..06da1e941e8f 100644 --- a/rasa/utils/train_utils.py +++ b/rasa/utils/train_utils.py @@ -323,7 +323,8 @@ def create_data_generators( validation_data_generator = None if eval_num_examples > 0: model_data, evaluation_model_data = model_data.split( - eval_num_examples, random_seed, + eval_num_examples, + random_seed, ) validation_data_generator = RasaBatchDataGenerator( evaluation_model_data, diff --git a/rasa/validator.py b/rasa/validator.py index 06d035070446..8432e3389686 100644 --- a/rasa/validator.py +++ b/rasa/validator.py @@ -295,7 +295,9 @@ def verify_story_structure( # Create a list of `StoryConflict` objects conflicts = rasa.core.training.story_conflict.find_story_conflicts( - trackers, self.domain, max_history, + trackers, + self.domain, + max_history, ) if not conflicts: diff --git a/tests/cli/test_rasa_evaluate_markers.py b/tests/cli/test_rasa_evaluate_markers.py index 7abf506725d5..8d3fdc894ed2 100644 --- a/tests/cli/test_rasa_evaluate_markers.py +++ b/tests/cli/test_rasa_evaluate_markers.py @@ -114,7 +114,9 @@ def test_markers_cli_results_save_correctly( endpoints_path = write_endpoint_config_to_yaml( tmp_path, - {"tracker_store": {"type": "sql", "db": db_path.replace("\\", "\\\\")},}, + { + "tracker_store": {"type": "sql", "db": db_path.replace("\\", "\\\\")}, + }, ) markers_path = write_markers_config_to_yaml( diff --git a/tests/cli/test_rasa_init.py b/tests/cli/test_rasa_init.py index 9647937a6598..9a3d2b796d17 100644 --- a/tests/cli/test_rasa_init.py +++ b/tests/cli/test_rasa_init.py @@ -77,7 +77,12 @@ def test_train_data_in_project_dir(monkeypatch: MonkeyPatch, tmp_path: Path): subparsers = parser.add_subparsers() scaffold.add_subparser(subparsers, parents=[]) - args = parser.parse_args(["init", "--no-prompt",]) + args = parser.parse_args( + [ + "init", + "--no-prompt", + ] + ) # Simple config which should train fast. def mock_get_config(*args): diff --git a/tests/cli/test_rasa_test.py b/tests/cli/test_rasa_test.py index ddafedfa4728..50ea742c2119 100644 --- a/tests/cli/test_rasa_test.py +++ b/tests/cli/test_rasa_test.py @@ -100,7 +100,10 @@ def test_test_with_no_user_utterance( run_in_simple_project_with_model: Callable[..., RunResult] ): write_yaml( - {"pipeline": "KeywordIntentClassifier", "policies": [{"name": "TEDPolicy"}],}, + { + "pipeline": "KeywordIntentClassifier", + "policies": [{"name": "TEDPolicy"}], + }, "config.yml", ) @@ -161,7 +164,12 @@ def test_test_nlu_cross_validation_with_autoconfig( nlu_path = str(testdir.tmpdir / "nlu.yml") shutil.copy(str(moodbot_nlu_data_path), nlu_path) write_yaml( - {"language": "en", "pipeline": None, "policies": None,}, config_path, + { + "language": "en", + "pipeline": None, + "policies": None, + }, + config_path, ) args = [ shutil.which("rasa"), diff --git a/tests/cli/test_rasa_train.py b/tests/cli/test_rasa_train.py index e637f65ae4d9..73bc680ecb9f 100644 --- a/tests/cli/test_rasa_train.py +++ b/tests/cli/test_rasa_train.py @@ -70,7 +70,8 @@ def test_train_finetune( run_in_simple_project: Callable[..., RunResult], capsys: CaptureFixture ): run_in_simple_project( - "train", "--finetune", + "train", + "--finetune", ) output = capsys.readouterr().out diff --git a/tests/cli/test_rasa_x.py b/tests/cli/test_rasa_x.py index f3694e62658b..9d54a20da201 100644 --- a/tests/cli/test_rasa_x.py +++ b/tests/cli/test_rasa_x.py @@ -161,7 +161,8 @@ async def test_pull_runtime_config_from_server(): def test_rasa_x_raises_warning_above_version_3( - monkeypatch: MonkeyPatch, run: Callable[..., RunResult], + monkeypatch: MonkeyPatch, + run: Callable[..., RunResult], ): def mock_run_locally(args): return None diff --git a/tests/conftest.py b/tests/conftest.py index 8de3d38243cd..36db32b79af2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -203,7 +203,9 @@ async def trained_default_agent_model( config_path = project_path / "config.yml" rasa.shared.utils.io.write_text_file(config, config_path) model_path = await trained_async( - domain_path, str(config_path), [stories_path, nlu_data_path], + domain_path, + str(config_path), + [stories_path, nlu_data_path], ) return model_path @@ -379,7 +381,9 @@ async def trained_core_model( stories_path: Text, ) -> Text: trained_core_model_path = await trained_async( - domain=domain_path, config=stack_config_path, training_files=[stories_path], + domain=domain_path, + config=stack_config_path, + training_files=[stories_path], ) return trained_core_model_path @@ -393,7 +397,9 @@ async def trained_nlu_model( stack_config_path: Text, ) -> Text: trained_nlu_model_path = await trained_async( - domain=domain_path, config=stack_config_path, training_files=[nlu_data_path], + domain=domain_path, + config=stack_config_path, + training_files=[nlu_data_path], ) return trained_nlu_model_path @@ -589,7 +595,9 @@ async def e2e_bot( @pytest.fixture(scope="module") -async def response_selector_agent(trained_response_selector_bot: Path,) -> Agent: +async def response_selector_agent( + trained_response_selector_bot: Path, +) -> Agent: return await load_agent(str(trained_response_selector_bot)) diff --git a/tests/core/actions/test_forms.py b/tests/core/actions/test_forms.py index 961e713d6460..a7c1a5b59d96 100644 --- a/tests/core/actions/test_forms.py +++ b/tests/core/actions/test_forms.py @@ -196,7 +196,8 @@ async def test_switch_forms_with_same_slot(default_agent: Agent): DefinePrevUserUtteredFeaturization(False), ] tracker.update_with_events( - next_events, domain, + next_events, + domain, ) events_expected.extend(next_events) @@ -394,13 +395,18 @@ async def test_action_rejection(): # Validate function says slot is invalid ( [{"event": "slot", "name": "num_people", "value": None}], - [SlotSet("num_people", None), SlotSet(REQUESTED_SLOT, "num_people"),], + [ + SlotSet("num_people", None), + SlotSet(REQUESTED_SLOT, "num_people"), + ], ), # Validate function decides to request a slot which is not part of the default # slot mapping ( [{"event": "slot", "name": "requested_slot", "value": "is_outside"}], - [SlotSet(REQUESTED_SLOT, "is_outside"),], + [ + SlotSet(REQUESTED_SLOT, "is_outside"), + ], ), # Validate function decides that no more slots should be requested ( @@ -431,7 +437,9 @@ async def test_action_rejection(): # User rejected manually ( [{"event": "action_execution_rejected", "name": "my form"}], - [ActionExecutionRejected("my form"),], + [ + ActionExecutionRejected("my form"), + ], ), ], ) @@ -542,7 +550,10 @@ async def test_request_correct_slots_after_unhappy_path_with_custom_required_slo ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "slot_2"), ActionExecuted(ACTION_LISTEN_NAME), - UserUttered("hello", intent={"name": "greet", "confidence": 1.0},), + UserUttered( + "hello", + intent={"name": "greet", "confidence": 1.0}, + ), ActionExecutionRejected(form_name), ActionExecuted("utter_greet"), ], @@ -692,7 +703,8 @@ async def test_validate_slots_on_activation_with_other_action_after_user_utteran @pytest.mark.parametrize( - "utterance_name", ["utter_ask_my_form_num_people", "utter_ask_num_people"], + "utterance_name", + ["utter_ask_my_form_num_people", "utter_ask_num_people"], ) def test_name_of_utterance(utterance_name: Text): form_name = "my_form" @@ -967,7 +979,10 @@ async def test_extract_other_slots_with_entity( "type": "any", "mappings": some_other_slot_mapping, }, - "some_slot": {"type": "any", "mappings": some_slot_mapping,}, + "some_slot": { + "type": "any", + "mappings": some_slot_mapping, + }, }, "forms": { form_name: {REQUIRED_SLOTS_KEY: ["some_other_slot", "some_slot"]} @@ -1126,7 +1141,8 @@ async def test_ask_for_slot_if_not_utter_ask( ], ) async def test_ignored_intents_with_slot_type_from_entity( - ignored_intents: Union[Text, List[Text]], slot_not_intent: Union[Text, List[Text]], + ignored_intents: Union[Text, List[Text]], + slot_not_intent: Union[Text, List[Text]], ): form_name = "some_form" entity_name = "some_slot" @@ -1219,7 +1235,8 @@ async def test_ignored_intents_with_slot_type_from_entity( ], ) async def test_ignored_intents_with_slot_type_from_text( - ignored_intents: Union[Text, List[Text]], slot_not_intent: Union[Text, List[Text]], + ignored_intents: Union[Text, List[Text]], + slot_not_intent: Union[Text, List[Text]], ): form_name = "some_form" entity_name = "some_slot" @@ -1453,7 +1470,9 @@ async def test_extract_other_slots_with_matched_mapping_conditions(): UserUttered( "My name is Emily.", intent={"name": "inform", "confidence": 1.0}, - entities=[{"entity": "name", "value": "Emily"},], + entities=[ + {"entity": "name", "value": "Emily"}, + ], ), ActionExecuted(ACTION_LISTEN_NAME), ], @@ -1525,7 +1544,9 @@ async def test_extract_other_slots_raises_no_matched_conditions(): UserUttered( "My name is Emily.", intent={"name": "inform", "confidence": 1.0}, - entities=[{"entity": "name", "value": "Emily"},], + entities=[ + {"entity": "name", "value": "Emily"}, + ], ), ActionExecuted(ACTION_LISTEN_NAME), ], @@ -1587,7 +1608,9 @@ async def test_action_extract_slots_custom_mapping_with_condition(): mocked.post( action_server_url, payload={ - "events": [{"event": "slot", "name": "custom_slot", "value": "test"},] + "events": [ + {"event": "slot", "name": "custom_slot", "value": "test"}, + ] }, ) @@ -1651,7 +1674,9 @@ async def test_form_slots_empty_with_restart(): UserUttered( "My name is Emily.", intent={"name": "inform", "confidence": 1.0}, - entities=[{"entity": "name", "value": "Emily"},], + entities=[ + {"entity": "name", "value": "Emily"}, + ], ), SlotSet("name", "emily"), Restarted(), diff --git a/tests/core/conftest.py b/tests/core/conftest.py index 705acab78152..61a954dee76f 100644 --- a/tests/core/conftest.py +++ b/tests/core/conftest.py @@ -35,7 +35,10 @@ def as_feature(self): class MockedMongoTrackerStore(MongoTrackerStore): """In-memory mocked version of `MongoTrackerStore`.""" - def __init__(self, _domain: Domain,) -> None: + def __init__( + self, + _domain: Domain, + ) -> None: from mongomock import MongoClient self.db = MongoClient().rasa diff --git a/tests/core/featurizers/test_precomputation.py b/tests/core/featurizers/test_precomputation.py index 722c5b25efe8..e65ceff1bbc4 100644 --- a/tests/core/featurizers/test_precomputation.py +++ b/tests/core/featurizers/test_precomputation.py @@ -363,7 +363,8 @@ def test_container_derive_messages_from_domain_and_add(): @pytest.fixture def input_converter( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, + default_execution_context: ExecutionContext, ): return CoreFeaturizationInputConverter.create( CoreFeaturizationInputConverter.get_default_config(), @@ -471,7 +472,8 @@ def test_converter_for_inference(input_converter: CoreFeaturizationInputConverte @pytest.fixture def collector( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, + default_execution_context: ExecutionContext, ): return CoreFeaturizationCollector.create( CoreFeaturizationCollector.get_default_config(), diff --git a/tests/core/featurizers/test_tracker_featurizer.py b/tests/core/featurizers/test_tracker_featurizer.py index fe29d1fec3c8..0682d9d55cb9 100644 --- a/tests/core/featurizers/test_tracker_featurizer.py +++ b/tests/core/featurizers/test_tracker_featurizer.py @@ -156,7 +156,9 @@ def test_featurize_trackers_with_full_dialogue_tracker_featurizer( tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ @@ -197,7 +199,8 @@ def test_featurize_trackers_with_full_dialogue_tracker_featurizer( def test_trackers_ignore_action_unlikely_intent_with_full_dialogue_tracker_featurizer( - moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]], + moodbot_domain: Domain, + moodbot_features: Dict[Text, Dict[Text, Features]], ): tracker = DialogueStateTracker.from_events( @@ -267,7 +270,8 @@ def test_trackers_ignore_action_unlikely_intent_with_full_dialogue_tracker_featu def test_trackers_keep_action_unlikely_intent_with_full_dialogue_tracker_featurizer( - moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]], + moodbot_domain: Domain, + moodbot_features: Dict[Text, Dict[Text, Features]], ): tracker = DialogueStateTracker.from_events( @@ -293,7 +297,9 @@ def test_trackers_keep_action_unlikely_intent_with_full_dialogue_tracker_featuri tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [tracker], moodbot_domain, precomputations=None, + [tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ @@ -345,7 +351,9 @@ def test_create_state_features_full_dialogue_tracker_featurizer( tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer) state_featurizer.prepare_for_training(moodbot_domain) actual_features = tracker_featurizer.create_state_features( - [moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ @@ -378,7 +386,8 @@ def test_create_state_features_full_dialogue_tracker_featurizer( def test_state_features_ignore_action_unlikely_intent_full_dialogue_tracker_featurizer( - moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]], + moodbot_domain: Domain, + moodbot_features: Dict[Text, Dict[Text, Features]], ): tracker = DialogueStateTracker.from_events( "default", @@ -435,7 +444,8 @@ def test_state_features_ignore_action_unlikely_intent_full_dialogue_tracker_feat def test_state_features_keep_action_unlikely_intent_full_dialogue_tracker_featurizer( - moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]], + moodbot_domain: Domain, + moodbot_features: Dict[Text, Dict[Text, Features]], ): tracker = DialogueStateTracker.from_events( "default", @@ -458,7 +468,9 @@ def test_state_features_keep_action_unlikely_intent_full_dialogue_tracker_featur tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer) state_featurizer.prepare_for_training(moodbot_domain) actual_features = tracker_featurizer.create_state_features( - [tracker], moodbot_domain, precomputations=None, + [tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ @@ -496,7 +508,8 @@ def test_prediction_states_with_full_dialogue_tracker_featurizer( state_featurizer = SingleStateFeaturizer() tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer) actual_states = tracker_featurizer.prediction_states( - [moodbot_tracker], moodbot_domain, + [moodbot_tracker], + moodbot_domain, ) expected_states = [ @@ -506,7 +519,10 @@ def test_prediction_states_with_full_dialogue_tracker_featurizer( PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + { + USER: {INTENT: "greet"}, + PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}, + }, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_unhappy"}, @@ -523,7 +539,10 @@ def test_prediction_states_with_full_dialogue_tracker_featurizer( PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "deny"}, }, - {USER: {INTENT: "deny"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_goodbye"},}, + { + USER: {INTENT: "deny"}, + PREVIOUS_ACTION: {ACTION_NAME: "utter_goodbye"}, + }, ] ] @@ -553,7 +572,9 @@ def test_prediction_states_hide_rule_states_with_full_dialogue_tracker_featurize ) actual_states = tracker_featurizer.prediction_states( - [rule_tracker], moodbot_domain, ignore_rule_only_turns=True, + [rule_tracker], + moodbot_domain, + ignore_rule_only_turns=True, ) expected_states = [ @@ -587,7 +608,9 @@ def test_prediction_states_hide_rule_states_with_full_dialogue_tracker_featurize ) actual_states = tracker_featurizer.prediction_states( - [embedded_rule_tracker], moodbot_domain, ignore_rule_only_turns=True, + [embedded_rule_tracker], + moodbot_domain, + ignore_rule_only_turns=True, ) expected_states = [ @@ -650,7 +673,10 @@ def test_prediction_states_ignore_action_intent_unlikely_full_dialogue_featurize PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + { + USER: {INTENT: "greet"}, + PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}, + }, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_great"}, @@ -697,7 +723,10 @@ def test_prediction_states_keeps_action_intent_unlikely_full_dialogue_featurizer domain=moodbot_domain, ) - actual_states = tracker_featurizer.prediction_states([tracker], moodbot_domain,) + actual_states = tracker_featurizer.prediction_states( + [tracker], + moodbot_domain, + ) expected_states = [ [ @@ -710,7 +739,10 @@ def test_prediction_states_keeps_action_intent_unlikely_full_dialogue_featurizer USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: ACTION_UNLIKELY_INTENT_NAME}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + { + USER: {INTENT: "greet"}, + PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}, + }, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_great"}, @@ -750,11 +782,15 @@ def test_featurize_trackers_with_max_history_tracker_featurizer( ) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ - [{},], + [ + {}, + ], [ {}, { @@ -867,7 +903,8 @@ def test_featurize_trackers_ignore_action_unlikely_intent_max_history_featurizer ) state_featurizer = SingleStateFeaturizer() tracker_featurizer = MaxHistoryTrackerFeaturizer( - state_featurizer, max_history=max_history, + state_featurizer, + max_history=max_history, ) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( @@ -878,7 +915,9 @@ def test_featurize_trackers_ignore_action_unlikely_intent_max_history_featurizer ) expected_features = [ - [{},], + [ + {}, + ], [ {}, { @@ -933,15 +972,20 @@ def test_featurize_trackers_keep_action_unlikely_intent_max_history_featurizer( ) state_featurizer = SingleStateFeaturizer() tracker_featurizer = MaxHistoryTrackerFeaturizer( - state_featurizer, max_history=max_history, + state_featurizer, + max_history=max_history, ) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [tracker], moodbot_domain, precomputations=None, + [tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ - [{},], + [ + {}, + ], [ {}, { @@ -988,7 +1032,12 @@ def test_featurize_trackers_keep_action_unlikely_intent_max_history_featurizer( @pytest.mark.parametrize( "remove_duplicates,max_history", - [[True, None], [True, 2], [False, None], [False, 2],], + [ + [True, None], + [True, 2], + [False, None], + [False, 2], + ], ) def test_deduplicate_featurize_trackers_with_max_history_tracker_featurizer( moodbot_tracker: DialogueStateTracker, @@ -1004,11 +1053,15 @@ def test_deduplicate_featurize_trackers_with_max_history_tracker_featurizer( # Add Duplicate moodbot_tracker states should get removed. actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [moodbot_tracker, moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker, moodbot_tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ - [{},], + [ + {}, + ], [ {}, { @@ -1118,7 +1171,9 @@ def test_create_state_features_with_max_history_tracker_featurizer( ) state_featurizer.prepare_for_training(moodbot_domain) actual_features = tracker_featurizer.create_state_features( - [moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ @@ -1245,7 +1300,9 @@ def test_create_state_features_keep_action_unlikely_intent_max_history_featurize ) state_featurizer.prepare_for_training(moodbot_domain) actual_features = tracker_featurizer.create_state_features( - [tracker], moodbot_domain, precomputations=None, + [tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ @@ -1291,7 +1348,8 @@ def test_prediction_states_with_max_history_tracker_featurizer( state_featurizer, max_history=max_history ) actual_states = tracker_featurizer.prediction_states( - [moodbot_tracker], moodbot_domain, + [moodbot_tracker], + moodbot_domain, ) expected_states = [ @@ -1301,7 +1359,10 @@ def test_prediction_states_with_max_history_tracker_featurizer( PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + { + USER: {INTENT: "greet"}, + PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}, + }, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_unhappy"}, @@ -1318,7 +1379,10 @@ def test_prediction_states_with_max_history_tracker_featurizer( PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "deny"}, }, - {USER: {INTENT: "deny"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_goodbye"},}, + { + USER: {INTENT: "deny"}, + PREVIOUS_ACTION: {ACTION_NAME: "utter_goodbye"}, + }, ] ] if max_history is not None: @@ -1355,7 +1419,9 @@ def test_prediction_states_hide_rule_states_with_max_history_tracker_featurizer( ) actual_states = tracker_featurizer.prediction_states( - [rule_tracker], moodbot_domain, ignore_rule_only_turns=True, + [rule_tracker], + moodbot_domain, + ignore_rule_only_turns=True, ) expected_states = [ @@ -1389,7 +1455,9 @@ def test_prediction_states_hide_rule_states_with_max_history_tracker_featurizer( ) actual_states = tracker_featurizer.prediction_states( - [embedded_rule_tracker], moodbot_domain, ignore_rule_only_turns=True, + [embedded_rule_tracker], + moodbot_domain, + ignore_rule_only_turns=True, ) expected_states = [ @@ -1460,7 +1528,10 @@ def test_prediction_states_ignores_action_intent_unlikely_max_history_featurizer PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + { + USER: {INTENT: "greet"}, + PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}, + }, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_great"}, @@ -1515,7 +1586,10 @@ def test_prediction_states_keeps_action_intent_unlikely_max_history_featurizer( domain=moodbot_domain, ) - actual_states = tracker_featurizer.prediction_states([tracker], moodbot_domain,) + actual_states = tracker_featurizer.prediction_states( + [tracker], + moodbot_domain, + ) expected_states = [ [ @@ -1528,7 +1602,10 @@ def test_prediction_states_keeps_action_intent_unlikely_max_history_featurizer( USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: ACTION_UNLIKELY_INTENT_NAME}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + { + USER: {INTENT: "greet"}, + PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}, + }, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_great"}, @@ -1578,11 +1655,15 @@ def test_featurize_trackers_with_intent_max_history_tracker_featurizer( ) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ - [{},], + [ + {}, + ], [ {}, { @@ -1652,7 +1733,8 @@ def test_trackers_ignore_action_unlikely_intent_intent_max_history_featurizer( ) state_featurizer = IntentTokenizerSingleStateFeaturizer() tracker_featurizer = IntentMaxHistoryTrackerFeaturizer( - state_featurizer, max_history=max_history, + state_featurizer, + max_history=max_history, ) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( @@ -1663,7 +1745,9 @@ def test_trackers_ignore_action_unlikely_intent_intent_max_history_featurizer( ) expected_features = [ - [{},], + [ + {}, + ], [ {}, { @@ -1719,15 +1803,20 @@ def test_trackers_keep_action_unlikely_intent_intent_max_history_featurizer( ) state_featurizer = IntentTokenizerSingleStateFeaturizer() tracker_featurizer = IntentMaxHistoryTrackerFeaturizer( - state_featurizer, max_history=max_history, + state_featurizer, + max_history=max_history, ) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [tracker], moodbot_domain, precomputations=None, + [tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ - [{},], + [ + {}, + ], [ {}, { @@ -1782,11 +1871,15 @@ def test_deduplicate_featurize_trackers_with_intent_max_history_tracker_featuriz # Add Duplicate moodbot_tracker states should get removed. actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [moodbot_tracker, moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker, moodbot_tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ - [{},], + [ + {}, + ], [ {}, { @@ -1862,7 +1955,9 @@ def test_create_state_features_with_intent_max_history_tracker_featurizer( ) state_featurizer.prepare_for_training(moodbot_domain) actual_features = tracker_featurizer.create_state_features( - [moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ @@ -1994,7 +2089,9 @@ def test_state_features_keep_action_unlikely_intent_intent_max_history_featurize ) state_featurizer.prepare_for_training(moodbot_domain) actual_features = tracker_featurizer.create_state_features( - [tracker], moodbot_domain, precomputations=None, + [tracker], + moodbot_domain, + precomputations=None, ) expected_features = [ @@ -2044,7 +2141,8 @@ def test_prediction_states_with_intent_max_history_tracker_featurizer( state_featurizer, max_history=max_history ) actual_states = tracker_featurizer.prediction_states( - [moodbot_tracker], moodbot_domain, + [moodbot_tracker], + moodbot_domain, ) expected_states = [ @@ -2054,7 +2152,10 @@ def test_prediction_states_with_intent_max_history_tracker_featurizer( PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + { + USER: {INTENT: "greet"}, + PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}, + }, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_unhappy"}, @@ -2103,7 +2204,9 @@ def test_prediction_states_hide_rule_states_intent_max_history_featurizer( ) actual_states = tracker_featurizer.prediction_states( - [rule_tracker], moodbot_domain, ignore_rule_only_turns=True, + [rule_tracker], + moodbot_domain, + ignore_rule_only_turns=True, ) expected_states = [[{}]] @@ -2127,10 +2230,16 @@ def test_prediction_states_hide_rule_states_intent_max_history_featurizer( ) actual_states = tracker_featurizer.prediction_states( - [embedded_rule_tracker], moodbot_domain, ignore_rule_only_turns=True, + [embedded_rule_tracker], + moodbot_domain, + ignore_rule_only_turns=True, ) - expected_states = [[{},]] + expected_states = [ + [ + {}, + ] + ] assert actual_states is not None assert len(actual_states) == len(expected_states) @@ -2179,7 +2288,10 @@ def test_prediction_states_ignores_action_intent_unlikely_intent_max_history_fea PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + { + USER: {INTENT: "greet"}, + PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}, + }, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_great"}, @@ -2243,7 +2355,10 @@ def test_prediction_states_keeps_action_intent_unlikely_intent_max_history_featu USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: ACTION_UNLIKELY_INTENT_NAME}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + { + USER: {INTENT: "greet"}, + PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}, + }, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_great"}, @@ -2271,14 +2386,21 @@ def test_prediction_states_keeps_action_intent_unlikely_intent_max_history_featu @pytest.mark.parametrize( "remove_duplicates, max_history", - [[True, None], [True, 2], [False, None], [False, 2],], + [ + [True, None], + [True, 2], + [False, None], + [False, 2], + ], ) def test_multilabels_with_intent_max_history_tracker_featurizer( moodbot_domain: Domain, max_history: Optional[int], remove_duplicates: bool ): state_featurizer = IntentTokenizerSingleStateFeaturizer() tracker_featurizer = IntentMaxHistoryTrackerFeaturizer( - state_featurizer, max_history=max_history, remove_duplicates=remove_duplicates, + state_featurizer, + max_history=max_history, + remove_duplicates=remove_duplicates, ) event_list1 = [ @@ -2303,7 +2425,9 @@ def test_multilabels_with_intent_max_history_tracker_featurizer( ) _1, actual_labels, _2 = tracker_featurizer.featurize_trackers( - [tracker1, tracker2], moodbot_domain, precomputations=None, + [tracker1, tracker2], + moodbot_domain, + precomputations=None, ) greet_index = 5 @@ -2312,7 +2436,10 @@ def test_multilabels_with_intent_max_history_tracker_featurizer( if remove_duplicates: expected_labels = np.array( - [[greet_index, -1], [mood_great_index, mood_unhappy_index],] + [ + [greet_index, -1], + [mood_great_index, mood_unhappy_index], + ] ) else: expected_labels = np.array( diff --git a/tests/core/policies/test_rule_policy.py b/tests/core/policies/test_rule_policy.py index 3d42ee27346b..915df78a8a77 100644 --- a/tests/core/policies/test_rule_policy.py +++ b/tests/core/policies/test_rule_policy.py @@ -1075,7 +1075,10 @@ async def test_predict_slot_with_initial_slot_matches_rule(policy: RulePolicy): form_conversation = DialogueStateTracker.from_events( "slot rule test", - evts=[ActionExecuted(ACTION_LISTEN_NAME), UserUttered(intent={"name": "i1"}),], + evts=[ + ActionExecuted(ACTION_LISTEN_NAME), + UserUttered(intent={"name": "i1"}), + ], domain=domain, slots=domain.slots, ) @@ -1926,7 +1929,8 @@ async def test_one_stage_fallback_rule(policy: RulePolicy): is_rule_tracker=True, ) policy.train( - [greet_rule_which_only_applies_at_start, fallback_recover_rule], domain, + [greet_rule_which_only_applies_at_start, fallback_recover_rule], + domain, ) # RulePolicy predicts fallback action @@ -2545,7 +2549,8 @@ def test_do_not_hide_rule_turn_with_loops_in_stories(policy: RulePolicy): form_activation_story.is_rule_tracker = False policy.train( - [form_activation_rule, form_activation_story], domain, + [form_activation_rule, form_activation_story], + domain, ) assert policy.lookup[RULE_ONLY_LOOPS] == [] @@ -2605,7 +2610,8 @@ def test_hide_rule_turn_with_loops_as_followup_action(policy: RulePolicy): form_activation_story.is_rule_tracker = False policy.train( - [form_activation_rule, GREET_RULE, form_activation_story], domain, + [form_activation_rule, GREET_RULE, form_activation_story], + domain, ) assert policy.lookup[RULE_ONLY_LOOPS] == [] @@ -3001,7 +3007,9 @@ def test_rule_with_multiple_slots(policy: RulePolicy): evts=[ ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), - UserUttered(intent={"name": intent_1},), + UserUttered( + intent={"name": intent_1}, + ), SlotSet(slot_1, value_1), SlotSet(slot_2, value_2), ActionExecuted(utter_1), @@ -3014,7 +3022,10 @@ def test_rule_with_multiple_slots(policy: RulePolicy): # the order of slots set doesn't matter for prediction conversation_events = [ ActionExecuted(ACTION_LISTEN_NAME), - UserUttered("haha", intent={"name": intent_1},), + UserUttered( + "haha", + intent={"name": intent_1}, + ), SlotSet(slot_2, value_2), SlotSet(slot_1, value_1), ] @@ -3069,7 +3080,9 @@ def test_include_action_unlikely_intent(policy: RulePolicy): evts=[ ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), - UserUttered(intent={"name": intent_1},), + UserUttered( + intent={"name": intent_1}, + ), SlotSet(slot_1, value_1), SlotSet(slot_2, value_2), ActionExecuted(utter_1), @@ -3101,7 +3114,10 @@ def test_include_action_unlikely_intent(policy: RulePolicy): # ignore action_unlikely_intent. conversation_events = [ ActionExecuted(ACTION_LISTEN_NAME), - UserUttered("haha", intent={"name": intent_1},), + UserUttered( + "haha", + intent={"name": intent_1}, + ), SlotSet(slot_2, value_2), SlotSet(slot_1, value_1), ActionExecuted(ACTION_UNLIKELY_INTENT_NAME), @@ -3118,7 +3134,10 @@ def test_include_action_unlikely_intent(policy: RulePolicy): # anywhere else also triggers utter_2 conversation_events = [ ActionExecuted(ACTION_LISTEN_NAME), - UserUttered("dummy", intent={"name": intent_2},), + UserUttered( + "dummy", + intent={"name": intent_2}, + ), ActionExecuted(ACTION_UNLIKELY_INTENT_NAME), ] prediction = policy.predict_action_probabilities( diff --git a/tests/core/policies/test_ted_policy.py b/tests/core/policies/test_ted_policy.py index 4d0d8cd90a6a..2ee756d5fa37 100644 --- a/tests/core/policies/test_ted_policy.py +++ b/tests/core/policies/test_ted_policy.py @@ -104,7 +104,9 @@ def test_diagnostics( precomputations = None policy.train([GREET_RULE], domain, precomputations) prediction = policy.predict_action_probabilities( - GREET_RULE, domain, precomputations, + GREET_RULE, + domain, + precomputations, ) assert prediction.diagnostic_data @@ -227,7 +229,8 @@ def test_train_fails_with_checkpoint_zero_eval_num_epochs(self, tmp_path: Path): " than 0 are allowed for this parameter." ) with pytest.raises( - InvalidConfigException, match=match_string, + InvalidConfigException, + match=match_string, ): train_core( domain="data/test_domains/default.yml", @@ -289,7 +292,9 @@ def test_ranking_length_and_renormalization( ): precomputations = None prediction = trained_policy.predict_action_probabilities( - tracker, default_domain, precomputations, + tracker, + default_domain, + precomputations, ) # first check the output is what we expect @@ -331,7 +336,10 @@ def test_label_data_assembly( assert assembled_label_data.num_examples == default_domain.num_actions assert list( assembled_label_data_signature[f"{LABEL}_{ACTION_NAME}"].keys() - ) == [MASK, SENTENCE,] + ) == [ + MASK, + SENTENCE, + ] assert list(assembled_label_data_signature[LABEL].keys()) == [IDS] assert ( assembled_label_data_signature[f"{LABEL}_{ACTION_NAME}"][SENTENCE][0].units @@ -346,7 +354,9 @@ def test_gen_batch( ) precomputations = None training_data, label_ids, entity_tags = trained_policy._featurize_for_training( - training_trackers, default_domain, precomputations, + training_trackers, + default_domain, + precomputations, ) _, all_labels = trained_policy._create_label_data( @@ -499,14 +509,22 @@ def test_gen_batch( [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered(text="hello", intent={"name": "greet"}), - EntitiesAdded(entities=[{"entity": "name", "value": "Peter"},]), + EntitiesAdded( + entities=[ + {"entity": "name", "value": "Peter"}, + ] + ), ActionExecuted(ACTION_UNLIKELY_INTENT_NAME), ActionExecuted("utter_greet"), ], [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered(text="hello", intent={"name": "greet"}), - EntitiesAdded(entities=[{"entity": "name", "value": "Peter"},]), + EntitiesAdded( + entities=[ + {"entity": "name", "value": "Peter"}, + ] + ), ActionExecuted("utter_greet"), ], ), @@ -548,10 +566,14 @@ def test_ignore_action_unlikely_intent( "test 2", evts=tracker_events_without_action ) prediction_with_action = trained_policy.predict_action_probabilities( - tracker_with_action, default_domain, precomputations, + tracker_with_action, + default_domain, + precomputations, ) prediction_without_action = trained_policy.predict_action_probabilities( - tracker_without_action, default_domain, precomputations, + tracker_without_action, + default_domain, + precomputations, ) # If the weights didn't change then both trackers @@ -625,7 +647,9 @@ def test_ranking_length_and_renormalization( default_domain: Domain, ): policy_prediction = trained_policy.predict_action_probabilities( - tracker, default_domain, precomputations=None, + tracker, + default_domain, + precomputations=None, ) assert sum(policy_prediction.probabilities) != pytest.approx(1) @@ -634,7 +658,9 @@ def test_prediction_on_empty_tracker( ): tracker = DialogueStateTracker(DEFAULT_SENDER_ID, default_domain.slots) prediction = trained_policy.predict_action_probabilities( - tracker, default_domain, precomputations=None, + tracker, + default_domain, + precomputations=None, ) assert not prediction.is_end_to_end_prediction assert len(prediction.probabilities) == default_domain.num_actions @@ -678,7 +704,9 @@ def test_ranking_length_and_renormalization( ): precomputations = None predicted_probabilities = trained_policy.predict_action_probabilities( - tracker, default_domain, precomputations, + tracker, + default_domain, + precomputations, ).probabilities assert all([confidence >= 0 for confidence in predicted_probabilities]) assert sum([confidence > 0 for confidence in predicted_probabilities]) == 4 diff --git a/tests/core/policies/test_unexpected_intent_policy.py b/tests/core/policies/test_unexpected_intent_policy.py index 5e220de03aa5..4c6306944e20 100644 --- a/tests/core/policies/test_unexpected_intent_policy.py +++ b/tests/core/policies/test_unexpected_intent_policy.py @@ -88,7 +88,9 @@ def test_ranking_length_and_renormalization( ): precomputations = None prediction_metadata = trained_policy.predict_action_probabilities( - tracker, default_domain, precomputations, + tracker, + default_domain, + precomputations, ).action_metadata assert ( prediction_metadata is None @@ -171,7 +173,9 @@ def test_prepared_data_for_threshold_prediction( default_domain, stories_path, augmentation_factor=0 ) training_model_data, _ = trained_policy._prepare_for_training( - training_trackers, default_domain, precomputations=None, + training_trackers, + default_domain, + precomputations=None, ) data_for_prediction = trained_policy._prepare_data_for_prediction( @@ -184,7 +188,11 @@ def test_similarities_collection_for_label_ids(self): label_ids = np.array([[0, 1], [1, -1], [2, -1]]) outputs = { "similarities": np.array( - [[[1.2, 0.3, 0.2]], [[0.5, 0.2, 1.6]], [[0.01, 0.1, 1.7]],] + [ + [[1.2, 0.3, 0.2]], + [[0.5, 0.2, 1.6]], + [[0.01, 0.1, 1.7]], + ] ) } label_id_similarities = UnexpecTEDIntentPolicy._collect_label_id_grouped_scores( @@ -313,7 +321,9 @@ def test_post_training_threshold_computation( default_domain, stories_path, augmentation_factor=0 ) training_model_data, label_ids = trained_policy._prepare_for_training( - training_trackers, default_domain, precomputations=None, + training_trackers, + default_domain, + precomputations=None, ) trained_policy.compute_label_quantiles_post_training( @@ -543,7 +553,8 @@ def test_action_unlikely_intent_prediction( tracker = DialogueStateTracker(sender_id="init", slots=default_domain.slots) tracker.update_with_events( - [UserUttered(text="hello", intent={"name": query_intent})], default_domain, + [UserUttered(text="hello", intent={"name": query_intent})], + default_domain, ) # Preset the model predictions to the similarity values @@ -699,7 +710,9 @@ def test_skip_predictions_if_new_intent( with caplog.at_level(logging.DEBUG): prediction = loaded_policy.predict_action_probabilities( - tracker, default_domain, precomputations=None, + tracker, + default_domain, + precomputations=None, ) assert "Skipping predictions for UnexpecTEDIntentPolicy" in caplog.text @@ -730,7 +743,11 @@ def test_skip_predictions_if_new_intent( [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered(text="hello", intent={"name": "greet"}), - EntitiesAdded(entities=[{"entity": "name", "value": "Peter"},]), + EntitiesAdded( + entities=[ + {"entity": "name", "value": "Peter"}, + ] + ), ActionExecuted(ACTION_UNLIKELY_INTENT_NAME), ActionExecuted("utter_greet"), UserUttered(text="sad", intent={"name": "thank_you"}), @@ -738,7 +755,11 @@ def test_skip_predictions_if_new_intent( [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered(text="hello", intent={"name": "greet"}), - EntitiesAdded(entities=[{"entity": "name", "value": "Peter"},]), + EntitiesAdded( + entities=[ + {"entity": "name", "value": "Peter"}, + ] + ), ActionExecuted("utter_greet"), UserUttered(text="sad", intent={"name": "thank_you"}), ], diff --git a/tests/core/test_actions.py b/tests/core/test_actions.py index 2b695de04411..84ed03d476ee 100644 --- a/tests/core/test_actions.py +++ b/tests/core/test_actions.py @@ -372,7 +372,10 @@ async def test_remote_action_utterances_with_none_values( UserUttered( text="hello", parse_data={ - "intent": {"name": "greet", "confidence": 0.9604260921478271,}, + "intent": { + "name": "greet", + "confidence": 0.9604260921478271, + }, "entities": [ {"entity": "city", "value": "London"}, {"entity": "count", "value": 1}, @@ -381,14 +384,38 @@ async def test_remote_action_utterances_with_none_values( "message_id": "3f4c04602a4947098c574b107d3ccc50", "metadata": {}, "intent_ranking": [ - {"name": "greet", "confidence": 0.9604260921478271,}, - {"name": "goodbye", "confidence": 0.01835782080888748,}, - {"name": "deny", "confidence": 0.011255578137934208,}, - {"name": "bot_challenge", "confidence": 0.004019865766167641,}, - {"name": "affirm", "confidence": 0.002524246694520116,}, - {"name": "mood_great", "confidence": 0.002214624546468258,}, - {"name": "chitchat", "confidence": 0.0009614597074687481,}, - {"name": "mood_unhappy", "confidence": 0.00024030178610701114,}, + { + "name": "greet", + "confidence": 0.9604260921478271, + }, + { + "name": "goodbye", + "confidence": 0.01835782080888748, + }, + { + "name": "deny", + "confidence": 0.011255578137934208, + }, + { + "name": "bot_challenge", + "confidence": 0.004019865766167641, + }, + { + "name": "affirm", + "confidence": 0.002524246694520116, + }, + { + "name": "mood_great", + "confidence": 0.002214624546468258, + }, + { + "name": "chitchat", + "confidence": 0.0009614597074687481, + }, + { + "name": "mood_unhappy", + "confidence": 0.00024030178610701114, + }, ], "response_selector": { "all_retrieval_intents": [], @@ -1139,14 +1166,20 @@ async def test_run_end_to_end_utterance_action(): ["Bob", "Mary"], UserUttered( intent={"name": "inform"}, - entities=[{"entity": "name", "value": "John"},], + entities=[ + {"entity": "name", "value": "John"}, + ], ), ["John"], ), ], ) async def test_action_extract_slots_predefined_mappings( - user: Event, slot_name: Text, slot_value: Any, new_user: Event, updated_value: Any, + user: Event, + slot_name: Text, + slot_value: Any, + new_user: Event, + updated_value: Any, ): domain = Domain.from_yaml( textwrap.dedent( @@ -1342,7 +1375,12 @@ async def test_action_extract_slots_when_mapping_applies( ["mushrooms", "kebab"], ), # Only one entity was extracted for `ListSlot` - ([{"entity": "topping", "value": "kebab"},], ["kebab"],), + ( + [ + {"entity": "topping", "value": "kebab"}, + ], + ["kebab"], + ), ], ) async def test_action_extract_slots_with_list_slot( @@ -1381,7 +1419,9 @@ async def test_action_extract_slots_with_list_slot( ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, slot_name), UserUttered( - "bla", intent={"name": "greet", "confidence": 1.0}, entities=entities, + "bla", + intent={"name": "greet", "confidence": 1.0}, + entities=entities, ), ActionExecuted(ACTION_LISTEN_NAME), ], @@ -1484,7 +1524,9 @@ async def test_action_extract_slots_with_matched_mapping_condition(): ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "name"), UserUttered( - "Emily", intent={"name": "inform", "confidence": 1.0}, entities=[], + "Emily", + intent={"name": "inform", "confidence": 1.0}, + entities=[], ), ], ) @@ -1699,7 +1741,12 @@ async def test_action_extract_slots_from_entity( domain = Domain.from_dict( { "entities": ["some_entity"], - "slots": {"some_slot": {"type": "any", "mappings": [mapping],}}, + "slots": { + "some_slot": { + "type": "any", + "mappings": [mapping], + } + }, "forms": {form_name: {REQUIRED_SLOTS_KEY: ["some_slot"]}}, } ) @@ -1739,7 +1786,12 @@ async def test_action_extract_slots_from_entity( ["mushrooms", "kebab"], ), # Only one entity was extracted for `ListSlot` - ([{"entity": "topping", "value": "kebab"},], ["kebab"],), + ( + [ + {"entity": "topping", "value": "kebab"}, + ], + ["kebab"], + ), ], ) async def test_extract_other_list_slot_from_entity( @@ -1840,7 +1892,10 @@ async def test_trigger_slot_mapping_applies( }, "forms": { form_name: { - REQUIRED_SLOTS_KEY: [entity_name, slot_filled_by_trigger_mapping,] + REQUIRED_SLOTS_KEY: [ + entity_name, + slot_filled_by_trigger_mapping, + ] } }, } @@ -1903,7 +1958,10 @@ async def test_trigger_slot_mapping_does_not_apply(trigger_slot_mapping: Dict): }, "forms": { form_name: { - REQUIRED_SLOTS_KEY: [entity_name, slot_filled_by_trigger_mapping,] + REQUIRED_SLOTS_KEY: [ + entity_name, + slot_filled_by_trigger_mapping, + ] } }, } @@ -1960,7 +2018,11 @@ async def test_trigger_slot_mapping_does_not_apply(trigger_slot_mapping: Dict): [{"event": "slot", "name": "custom_slot", "value": True}], [SlotSet("custom_slot", True)], ), - (UserUttered("bla"), [], [],), + ( + UserUttered("bla"), + [], + [], + ), ], ) async def test_action_extract_slots_execute_validation_action( @@ -2063,7 +2125,8 @@ async def test_action_extract_slots_custom_action_and_predefined_slot_validation ) domain = Domain.from_yaml(domain_yaml) event = UserUttered( - intent={"name": "inform"}, entities=[{"entity": "city", "value": "london"}], + intent={"name": "inform"}, + entities=[{"entity": "city", "value": "london"}], ) tracker = DialogueStateTracker.from_events(sender_id="test_id", evts=[event]) @@ -2233,7 +2296,8 @@ async def test_action_extract_slots_disallowed_events(caplog: LogCaptureFixture) ], ) async def test_action_extract_slots_warns_custom_action_exceptions( - caplog: LogCaptureFixture, exception: Exception, + caplog: LogCaptureFixture, + exception: Exception, ): domain_yaml = textwrap.dedent( """ @@ -2259,7 +2323,8 @@ async def test_action_extract_slots_warns_custom_action_exceptions( with aioresponses() as mocked: mocked.post( - action_server_url, exception=exception, + action_server_url, + exception=exception, ) action_server = EndpointConfig(action_server_url) diff --git a/tests/core/test_agent.py b/tests/core/test_agent.py index 79a7a7d8d2a9..c577f66f0081 100644 --- a/tests/core/test_agent.py +++ b/tests/core/test_agent.py @@ -237,7 +237,10 @@ async def test_agent_handle_message_full_model(default_agent: Agent): ActionExecuted(action_name="action_session_start"), SessionStarted(), ActionExecuted(action_name="action_listen"), - UserUttered(text="hello", intent={"name": "greet"},), + UserUttered( + text="hello", + intent={"name": "greet"}, + ), DefinePrevUserUtteredFeaturization(False), ActionExecuted(action_name="utter_greet"), BotUttered( @@ -273,7 +276,10 @@ async def test_agent_handle_message_only_nlu(trained_nlu_model: Text): ActionExecuted(action_name="action_session_start"), SessionStarted(), ActionExecuted(action_name="action_listen"), - UserUttered(text="hello", intent={"name": "greet"},), + UserUttered( + text="hello", + intent={"name": "greet"}, + ), ], model_id, ) @@ -294,7 +300,10 @@ async def test_agent_handle_message_only_core(trained_core_model: Text): ActionExecuted(action_name="action_session_start"), SessionStarted(), ActionExecuted(action_name="action_listen"), - UserUttered(text="/greet", intent={"name": "greet"},), + UserUttered( + text="/greet", + intent={"name": "greet"}, + ), DefinePrevUserUtteredFeaturization(False), ActionExecuted(action_name="utter_greet"), BotUttered( diff --git a/tests/core/test_ensemble.py b/tests/core/test_ensemble.py index 3502d085fadd..affd8344be2a 100644 --- a/tests/core/test_ensemble.py +++ b/tests/core/test_ensemble.py @@ -31,7 +31,10 @@ def test_default_predict_complains_if_no_predictions_given( default_ensemble: DefaultPolicyPredictionEnsemble, ): domain = Domain.load("data/test_domains/default.yml") - tracker = DialogueStateTracker.from_events(sender_id="arbitrary", evts=[],) + tracker = DialogueStateTracker.from_events( + sender_id="arbitrary", + evts=[], + ) with pytest.raises(InvalidConfigException): default_ensemble.combine_predictions_from_kwargs(domain=domain, tracker=tracker) @@ -40,7 +43,10 @@ def test_default_predict_ignores_other_kwargs( default_ensemble: DefaultPolicyPredictionEnsemble, ): domain = Domain.load("data/test_domains/default.yml") - tracker = DialogueStateTracker.from_events(sender_id="arbitrary", evts=[],) + tracker = DialogueStateTracker.from_events( + sender_id="arbitrary", + evts=[], + ) prediction = PolicyPrediction( policy_name="arbitrary", probabilities=[1.0], policy_priority=1 ) diff --git a/tests/core/test_evaluation.py b/tests/core/test_evaluation.py index 9e138c47272b..fd5b19f1acca 100644 --- a/tests/core/test_evaluation.py +++ b/tests/core/test_evaluation.py @@ -311,7 +311,9 @@ def test_event_has_proper_implementation( ) async def test_retrieval_intent(response_selector_agent: Agent, test_file: Text): generator = _create_data_generator( - test_file, response_selector_agent, use_conversation_test_files=True, + test_file, + response_selector_agent, + use_conversation_test_files=True, ) test_stories = generator.generate_story_trackers() @@ -476,7 +478,8 @@ async def test_story_report( async def test_story_report_with_empty_stories( - tmpdir: Path, core_agent: Agent, + tmpdir: Path, + core_agent: Agent, ) -> None: stories_path = tmpdir / "stories.yml" stories_path.write_text("", "utf8") @@ -494,12 +497,30 @@ async def test_story_report_with_empty_stories( @pytest.mark.parametrize( "skip_field,skip_value", [ - [None, None,], - ["precision", None,], - ["f1", None,], - ["in_training_data_fraction", None,], - ["report", None,], - ["include_report", False,], + [ + None, + None, + ], + [ + "precision", + None, + ], + [ + "f1", + None, + ], + [ + "in_training_data_fraction", + None, + ], + [ + "report", + None, + ], + [ + "include_report", + False, + ], ], ) async def test_log_evaluation_table(caplog, skip_field, skip_value): @@ -618,7 +639,8 @@ async def test_wrong_predictions_with_intent_and_entities( async def test_failed_entity_extraction_comment( - tmpdir: Path, restaurantbot_agent: Agent, + tmpdir: Path, + restaurantbot_agent: Agent, ): test_file = "data/test_yaml_stories/test_failed_entity_extraction_comment.yml" stories_path = str(tmpdir / FAILED_STORIES_FILE) diff --git a/tests/core/test_migrate.py b/tests/core/test_migrate.py index b80cff9b6c95..d4e14de422f8 100644 --- a/tests/core/test_migrate.py +++ b/tests/core/test_migrate.py @@ -642,7 +642,9 @@ def test_migrate_domain_dir_with_out_path_as_file(tmp_path: Path): assert domain -def test_migrate_domain_multiple_files_with_duplicate_slots(tmp_path: Path,): +def test_migrate_domain_multiple_files_with_duplicate_slots( + tmp_path: Path, +): domain_dir = tmp_path / "domain" domain_dir.mkdir() @@ -833,7 +835,9 @@ def test_migrate_domain_raises_for_non_domain_files(tmp_path: Path): rasa.core.migrate.migrate_domain_format(domain_dir, domain_dir) -def test_migration_cleanup(tmp_path: Path,): +def test_migration_cleanup( + tmp_path: Path, +): domain_dir = tmp_path / "domain" domain_dir.mkdir() migrated_domain_dir = tmp_path / "domain2" diff --git a/tests/core/test_policies.py b/tests/core/test_policies.py index 6045f0147c84..94602e704412 100644 --- a/tests/core/test_policies.py +++ b/tests/core/test_policies.py @@ -80,7 +80,9 @@ def _policy_class_to_test() -> Type[Policy]: max_history = 3 # this is the amount of history we test on @pytest.fixture(scope="class") - def resource(self,) -> Resource: + def resource( + self, + ) -> Resource: return Resource(uuid.uuid4().hex) @pytest.fixture(scope="class") @@ -210,7 +212,8 @@ def test_prediction_on_empty_tracker( ): tracker = DialogueStateTracker(DEFAULT_SENDER_ID, default_domain.slots) prediction = trained_policy.predict_action_probabilities( - tracker, default_domain, + tracker, + default_domain, ) assert not prediction.is_end_to_end_prediction assert len(prediction.probabilities) == default_domain.num_actions @@ -228,12 +231,18 @@ def test_persist_and_load_empty_policy( ): resource = Resource(uuid.uuid4().hex) empty_policy = self.create_policy( - None, default_model_storage, resource, execution_context, + None, + default_model_storage, + resource, + execution_context, ) empty_policy.train([], default_domain) loaded = empty_policy.__class__.load( - self._config(), default_model_storage, resource, execution_context, + self._config(), + default_model_storage, + resource, + execution_context, ) assert loaded is not None @@ -241,7 +250,10 @@ def test_persist_and_load_empty_policy( @staticmethod def _get_next_action(policy: Policy, events: List[Event], domain: Domain) -> Text: tracker = get_tracker(events) - scores = policy.predict_action_probabilities(tracker, domain,).probabilities + scores = policy.predict_action_probabilities( + tracker, + domain, + ).probabilities index = scores.index(max(scores)) return domain.action_names_or_texts[index] @@ -459,7 +471,8 @@ def test_finetune_after_load( ) loaded_policy.train( - original_train_data + [new_story], default_domain, + original_train_data + [new_story], + default_domain, ) # Get the hash of the tracker state of new story @@ -517,10 +530,12 @@ def test_ignore_action_unlikely_intent( "test 2", evts=tracker_events_without_action, slots=default_domain.slots ) prediction_with_action = trained_policy.predict_action_probabilities( - tracker_with_action, default_domain, + tracker_with_action, + default_domain, ) prediction_without_action = trained_policy.predict_action_probabilities( - tracker_without_action, default_domain, + tracker_without_action, + default_domain, ) # Memoization shouldn't be affected with the @@ -628,10 +643,16 @@ def test_prediction( ActionExecuted(UTTER_BYE_ACTION), ] training_story = TrackerWithCachedStates.from_events( - "training story", evts=events, domain=domain, slots=domain.slots, + "training story", + evts=events, + domain=domain, + slots=domain.slots, ) test_story = TrackerWithCachedStates.from_events( - "training story", events[:-1], domain=domain, slots=domain.slots, + "training story", + events[:-1], + domain=domain, + slots=domain.slots, ) policy.train([training_story], domain) prediction = policy.predict_action_probabilities(test_story, domain) diff --git a/tests/core/test_test.py b/tests/core/test_test.py index 134df290d886..4a4064af698c 100644 --- a/tests/core/test_test.py +++ b/tests/core/test_test.py @@ -40,7 +40,10 @@ def _probabilities_with_action_unlikely_intent_for( _original = DefaultPolicyPredictionEnsemble.combine_predictions_from_kwargs def combine_predictions_from_kwargs( - self, tracker: DialogueStateTracker, domain: Domain, **kwargs: Any, + self, + tracker: DialogueStateTracker, + domain: Domain, + **kwargs: Any, ) -> PolicyPrediction: latest_event = tracker.events[-1] if ( @@ -73,9 +76,7 @@ def combine_predictions_from_kwargs( def _custom_prediction_states_for_rules( ignore_action_unlikely_intent: bool = False, -) -> Callable[ - [RulePolicy, DialogueStateTracker, Domain, bool], List[State], -]: +) -> Callable[[RulePolicy, DialogueStateTracker, Domain, bool], List[State],]: """Creates prediction states for `RulePolicy`. `RulePolicy` does not ignore `action_unlikely_intent` in reality. @@ -522,7 +523,9 @@ async def test_action_unlikely_intent_warning_and_story_error( agent = await _train_rule_based_agent(train_file_name, True) result = await rasa.core.test.test( - str(test_file_name), agent, out_directory=str(tmp_path), + str(test_file_name), + agent, + out_directory=str(tmp_path), ) assert "report" in result.keys() assert result["report"]["conversation_accuracy"]["correct"] == 0 diff --git a/tests/core/test_tracker_stores.py b/tests/core/test_tracker_stores.py index 2905a81f813b..9bab3c699a5d 100644 --- a/tests/core/test_tracker_stores.py +++ b/tests/core/test_tracker_stores.py @@ -227,7 +227,8 @@ def test_raise_connection_exception_redis_tracker_store_creation( def test_mongo_tracker_store_raise_exception( - domain: Domain, monkeypatch: MonkeyPatch, + domain: Domain, + monkeypatch: MonkeyPatch, ): monkeypatch.setattr( rasa.core.tracker_store, @@ -593,7 +594,9 @@ def test_sql_additional_events_with_session_start(domain: Domain): [(MockedMongoTrackerStore, {}), (SQLTrackerStore, {"host": "sqlite:///"})], ) def test_tracker_store_retrieve_with_session_started_events( - tracker_store_type: Type[TrackerStore], tracker_store_kwargs: Dict, domain: Domain, + tracker_store_type: Type[TrackerStore], + tracker_store_kwargs: Dict, + domain: Domain, ): tracker_store = tracker_store_type(domain, **tracker_store_kwargs) events = [ @@ -622,7 +625,9 @@ def test_tracker_store_retrieve_with_session_started_events( [(MockedMongoTrackerStore, {}), (SQLTrackerStore, {"host": "sqlite:///"})], ) def test_tracker_store_retrieve_without_session_started_events( - tracker_store_type: Type[TrackerStore], tracker_store_kwargs: Dict, domain, + tracker_store_type: Type[TrackerStore], + tracker_store_kwargs: Dict, + domain, ): tracker_store = tracker_store_type(domain, **tracker_store_kwargs) @@ -871,7 +876,9 @@ def test_login_db_with_no_postgresql(tmp_path: Path): "type": "mongod", "url": "mongodb://0.0.0.0:42/?serverSelectionTimeoutMS=5000", }, - {"type": "dynamo",}, + { + "type": "dynamo", + }, ], ) def test_tracker_store_connection_error(config: Dict, domain: Domain): @@ -882,7 +889,9 @@ def test_tracker_store_connection_error(config: Dict, domain: Domain): async def prepare_token_serialisation( - tracker_store: TrackerStore, response_selector_agent: Agent, sender_id: Text, + tracker_store: TrackerStore, + response_selector_agent: Agent, + sender_id: Text, ): text = "Good morning" tokenizer = WhitespaceTokenizer(WhitespaceTokenizer.get_default_config()) diff --git a/tests/core/test_training.py b/tests/core/test_training.py index 6e4ab56b2dd4..43f7f9a50584 100644 --- a/tests/core/test_training.py +++ b/tests/core/test_training.py @@ -36,7 +36,10 @@ async def test_random_seed( tmp_path: Path, monkeypatch: MonkeyPatch, domain_path: Text, stories_path: Text ): policies_config = { - "policies": [{"name": "TEDPolicy", "random_seed": 42}, {"name": "RulePolicy"},] + "policies": [ + {"name": "TEDPolicy", "random_seed": 42}, + {"name": "RulePolicy"}, + ] } config_file = tmp_path / "config.yml" rasa.shared.utils.io.write_yaml(policies_config, config_file) diff --git a/tests/core/training/test_interactive.py b/tests/core/training/test_interactive.py index bb24dcf9fab1..ea1ef0f8359b 100644 --- a/tests/core/training/test_interactive.py +++ b/tests/core/training/test_interactive.py @@ -481,7 +481,8 @@ async def test_undo_latest_msg(mock_endpoint): async def test_write_stories_to_file( - mock_endpoint: EndpointConfig, tmp_path, + mock_endpoint: EndpointConfig, + tmp_path, ): tracker_dump = rasa.shared.utils.io.read_file( "data/test_trackers/tracker_moodbot_with_new_utterances.json" @@ -620,7 +621,11 @@ async def test_filter_intents_before_save_nlu_file(domain_path: Text): @pytest.mark.parametrize( "path, expected_format", - [("bla.json", RASA), ("other.yml", RASA_YAML), ("unknown", UNK),], + [ + ("bla.json", RASA), + ("other.yml", RASA_YAML), + ("unknown", UNK), + ], ) def test_get_nlu_target_format(path: Text, expected_format: Text): assert interactive._get_nlu_target_format(path) == expected_format @@ -772,7 +777,12 @@ async def test_correct_question_for_action_name_was_asked( monkeypatch.setattr( interactive, "_request_action_from_user", - AsyncMock(return_value=("action_another_one", False,)), + AsyncMock( + return_value=( + "action_another_one", + False, + ) + ), ) mocked_send_action = AsyncMock() diff --git a/tests/core/training/test_story_conflict.py b/tests/core/training/test_story_conflict.py index f4b4fb7ca743..dfcb872b210c 100644 --- a/tests/core/training/test_story_conflict.py +++ b/tests/core/training/test_story_conflict.py @@ -181,16 +181,25 @@ async def test_has_prior_events(): async def test_get_previous_event(): assert _get_previous_event( {PREVIOUS_ACTION: {"action_name": "utter_greet"}, USER: {"intent": "greet"}} - ) == ("action", "utter_greet",) + ) == ( + "action", + "utter_greet", + ) assert _get_previous_event( {PREVIOUS_ACTION: {"action_text": "this is a test"}, USER: {"intent": "greet"}} - ) == ("bot utterance", "this is a test",) + ) == ( + "bot utterance", + "this is a test", + ) assert _get_previous_event( { PREVIOUS_ACTION: {"action_name": ACTION_LISTEN_NAME}, USER: {"intent": "greet"}, } - ) == ("intent", "greet",) + ) == ( + "intent", + "greet", + ) async def test_has_no_prior_events(): diff --git a/tests/dialogues.py b/tests/dialogues.py index 9bb8326397d2..5edc954172b3 100644 --- a/tests/dialogues.py +++ b/tests/dialogues.py @@ -11,7 +11,10 @@ TEST_DEFAULT_DIALOGUE = Dialogue( name="default", events=[ - ActionExecuted(action_name="action_listen", timestamp=1551952977.4850519,), + ActionExecuted( + action_name="action_listen", + timestamp=1551952977.4850519, + ), UserUttered( entities=[{"end": 19, "entity": "name", "start": 14, "value": "Peter"}], intent={"confidence": 0.0, "name": "greet"}, @@ -29,7 +32,10 @@ timestamp=1551953035.076376, ), SlotSet(key="name", timestamp=1551953035.076385, value="Peter"), - ActionExecuted(action_name="utter_greet", timestamp=1551953040.607782,), + ActionExecuted( + action_name="utter_greet", + timestamp=1551953040.607782, + ), BotUttered( data={"attachment": None, "buttons": None, "elements": None}, text="hey there Peter!", @@ -40,7 +46,10 @@ TEST_FORMBOT_DIALOGUE = Dialogue( name="formbot", events=[ - ActionExecuted(action_name="action_listen", timestamp=1551884035.892855,), + ActionExecuted( + action_name="action_listen", + timestamp=1551884035.892855, + ), UserUttered( intent={"confidence": 0.3748943507671356, "name": "greet"}, parse_data={ @@ -128,7 +137,8 @@ timestamp=1551884208.092693, ), ActionExecuted( - action_name="action_deactivate_loop", timestamp=1551884214.951055, + action_name="action_deactivate_loop", + timestamp=1551884214.951055, ), ActiveLoop(name=None, timestamp=1551884214.9510589), SlotSet(key="requested_slot", timestamp=1551884214.951062, value=None), @@ -143,7 +153,10 @@ TEST_MOODBOT_DIALOGUE = Dialogue( name="moodbot", events=[ - ActionExecuted(action_name="action_listen", timestamp=1551883958.346432,), + ActionExecuted( + action_name="action_listen", + timestamp=1551883958.346432, + ), UserUttered( intent={"confidence": 0.44488201660555066, "name": "greet"}, parse_data={ diff --git a/tests/docs/test_docs_use_base_url.py b/tests/docs/test_docs_use_base_url.py index a221200bec91..d03fd1696d2c 100644 --- a/tests/docs/test_docs_use_base_url.py +++ b/tests/docs/test_docs_use_base_url.py @@ -10,7 +10,8 @@ # we're matching anchors with href containing strings, but not starting # with "http". This also exclude local href already configured using `useBaseUrl()` ANCHOR_RE = re.compile( - r"<(a|Button)[^>]*href=\"(?P(?!http).+?)\"[^>]*>", re.DOTALL, + r"<(a|Button)[^>]*href=\"(?P(?!http).+?)\"[^>]*>", + re.DOTALL, ) diff --git a/tests/engine/recipes/test_default_recipe.py b/tests/engine/recipes/test_default_recipe.py index 84c61e5f4b6b..2aefa2e639ad 100644 --- a/tests/engine/recipes/test_default_recipe.py +++ b/tests/engine/recipes/test_default_recipe.py @@ -136,7 +136,9 @@ def test_generate_graphs( config = rasa.shared.utils.io.read_yaml_file(config_path) - recipe = Recipe.recipe_for_name(DefaultV1Recipe.name,) + recipe = Recipe.recipe_for_name( + DefaultV1Recipe.name, + ) model_config = recipe.graph_config_for_recipe( config, {}, training_type=training_type, is_finetuning=is_finetuning ) @@ -173,7 +175,10 @@ def test_language_returning(): ) recipe = Recipe.recipe_for_name(DefaultV1Recipe.name) - model_config = recipe.graph_config_for_recipe(config, {},) + model_config = recipe.graph_config_for_recipe( + config, + {}, + ) assert model_config.language == "xy" @@ -193,7 +198,8 @@ def test_tracker_generator_parameter_interpolation(): recipe = Recipe.recipe_for_name(DefaultV1Recipe.name) model_config = recipe.graph_config_for_recipe( - config, {"augmentation_factor": augmentation, "debug_plots": debug_plots}, + config, + {"augmentation_factor": augmentation, "debug_plots": debug_plots}, ) node = model_config.train_schema.nodes["training_tracker_provider"] @@ -216,7 +222,8 @@ def test_nlu_training_data_persistence(): recipe = Recipe.recipe_for_name(DefaultV1Recipe.name) model_config = recipe.graph_config_for_recipe( - config, {"persist_nlu_training_data": True}, + config, + {"persist_nlu_training_data": True}, ) node = model_config.train_schema.nodes["nlu_training_data_provider"] @@ -237,10 +244,17 @@ def test_num_threads_interpolation(): expected_predict_schema = GraphSchema.from_dict(expected_schema_as_dict) for node_name, node in expected_train_schema.nodes.items(): - if issubclass( - node.uses, - (SklearnIntentClassifier, MitieEntityExtractor, MitieIntentClassifier,), - ) and node_name.startswith("train_"): + if ( + issubclass( + node.uses, + ( + SklearnIntentClassifier, + MitieEntityExtractor, + MitieIntentClassifier, + ), + ) + and node_name.startswith("train_") + ): node.config["num_threads"] = 20 config = rasa.shared.utils.io.read_yaml_file( @@ -387,19 +401,25 @@ def test_retrieve_via_invalid_module_path(): with pytest.raises(ImportError): path = "rasa.core.policies.ted_policy.TEDPolicy1000" DefaultV1Recipe().graph_config_for_recipe( - {"policies": [{"name": path}]}, {}, TrainingType.CORE, + {"policies": [{"name": path}]}, + {}, + TrainingType.CORE, ) def test_train_nlu_without_nlu_pipeline(): with pytest.raises(InvalidConfigException): DefaultV1Recipe().graph_config_for_recipe( - {"pipeline": []}, {}, TrainingType.NLU, + {"pipeline": []}, + {}, + TrainingType.NLU, ) def test_train_core_without_nlu_pipeline(): with pytest.raises(InvalidConfigException): DefaultV1Recipe().graph_config_for_recipe( - {"policies": []}, {}, TrainingType.CORE, + {"policies": []}, + {}, + TrainingType.CORE, ) diff --git a/tests/engine/storage/test_local_model_storage.py b/tests/engine/storage/test_local_model_storage.py index 46dafc265eff..1e8995b17222 100644 --- a/tests/engine/storage/test_local_model_storage.py +++ b/tests/engine/storage/test_local_model_storage.py @@ -84,7 +84,8 @@ def test_read_from_rasa2_resource(tmp_path_factory: TempPathFactory): def test_create_model_package( - tmp_path_factory: TempPathFactory, domain: Domain, + tmp_path_factory: TempPathFactory, + domain: Domain, ): train_model_storage = LocalModelStorage( tmp_path_factory.mktemp("train model storage") @@ -146,9 +147,10 @@ def test_create_model_package( just_packaged_metadata = LocalModelStorage.metadata_from_archive(archive_path) - (load_model_storage, packaged_metadata,) = LocalModelStorage.from_model_archive( - load_model_storage_dir, archive_path - ) + ( + load_model_storage, + packaged_metadata, + ) = LocalModelStorage.from_model_archive(load_model_storage_dir, archive_path) assert just_packaged_metadata.trained_at == packaged_metadata.trained_at @@ -166,7 +168,9 @@ def test_create_model_package( def test_read_unsupported_model( - monkeypatch: MonkeyPatch, tmp_path_factory: TempPathFactory, domain: Domain, + monkeypatch: MonkeyPatch, + tmp_path_factory: TempPathFactory, + domain: Domain, ): train_model_storage = LocalModelStorage( tmp_path_factory.mktemp("train model storage") diff --git a/tests/engine/test_caching.py b/tests/engine/test_caching.py index 5c0ad884b6ee..7277b76b84e6 100644 --- a/tests/engine/test_caching.py +++ b/tests/engine/test_caching.py @@ -236,7 +236,9 @@ def test_restore_cached_output_with_invalid_module( ) monkeypatch.setattr( - rasa.shared.utils.common, "class_from_module_path", cached_module, + rasa.shared.utils.common, + "class_from_module_path", + cached_module, ) assert ( diff --git a/tests/engine/test_loader.py b/tests/engine/test_loader.py index 5309251cbb51..9f60e1b9cd55 100644 --- a/tests/engine/test_loader.py +++ b/tests/engine/test_loader.py @@ -41,7 +41,9 @@ def test_loader_loads_graph_runner( uses=PersistableTestComponent, fn="train", constructor_name="create", - config={"test_value": test_value,}, + config={ + "test_value": test_value, + }, is_target=True, ), "load": SchemaNode( @@ -70,7 +72,8 @@ def test_loader_loads_graph_runner( output_filename = tmp_path / "model.tar.gz" importer = TrainingDataImporter.load_from_dict( - training_data_paths=[], domain_path=str(domain_path), + training_data_paths=[], + domain_path=str(domain_path), ) trained_at = datetime.utcnow() diff --git a/tests/engine/test_validation.py b/tests/engine/test_validation.py index c171e6fbe718..b9b2a1cfd503 100644 --- a/tests/engine/test_validation.py +++ b/tests/engine/test_validation.py @@ -53,7 +53,9 @@ def run(self) -> List[Message]: class TestCoreTarget(TestComponentWithoutRun): - def run(self,) -> PolicyPrediction: + def run( + self, + ) -> PolicyPrediction: pass @@ -140,7 +142,9 @@ def test_graph_component_fn_does_not_exist(): with pytest.raises( GraphSchemaValidationException, match="required method 'some_fn'" ): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_graph_output_is_not_fingerprintable_int(): @@ -151,7 +155,9 @@ def run(self) -> int: graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="fingerprintable"): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_predict_graph_output_is_not_fingerprintable(): @@ -172,7 +178,9 @@ def run(self) -> Any: graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="fingerprintable"): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_graph_output_is_not_fingerprintable_None(): @@ -180,10 +188,14 @@ class MyComponent(TestComponentWithoutRun): def run(self) -> None: pass - graph_config = create_test_schema(uses=MyComponent,) + graph_config = create_test_schema( + uses=MyComponent, + ) with pytest.raises(GraphSchemaValidationException, match="fingerprintable"): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_graph_with_forward_referenced_output_type(): @@ -196,7 +208,9 @@ def run(self) -> "UserUttered": # noqa: F821 graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="forward reference"): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_graph_output_missing_type_annotation(): @@ -209,7 +223,9 @@ def run(self): with pytest.raises( GraphSchemaValidationException, match="does not have a type annotation" ): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_graph_with_fingerprintable_output(): @@ -233,7 +249,9 @@ def run(self) -> MyTrainingData: graph_config = create_test_schema(uses=MyComponent) - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_graph_constructor_missing(): @@ -246,7 +264,9 @@ def run(self) -> TrainingData: with pytest.raises( GraphSchemaValidationException, match="required method 'invalid'" ): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_graph_constructor_config_wrong_type(): @@ -264,7 +284,9 @@ def create( graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="incompatible type"): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_graph_constructor_resource_wrong_type(): @@ -282,7 +304,9 @@ def create( graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="incompatible type"): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_graph_constructor_model_storage_wrong_type(): @@ -300,7 +324,9 @@ def create( graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="incompatible type"): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_graph_constructor_execution_context_wrong_type(): @@ -318,7 +344,9 @@ def create( graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="incompatible type"): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) @pytest.mark.parametrize( @@ -359,7 +387,8 @@ def supported_languages() -> Optional[List[Text]]: @pytest.mark.parametrize( - "current_language, not_supported_languages", [("de", ["de", "en"]), ("en", ["en"])], + "current_language, not_supported_languages", + [("de", ["de", "en"]), ("en", ["en"])], ) def test_graph_constructor_execution_exclusive_list_not_supported_language( current_language: Text, not_supported_languages: Optional[List[Text]] @@ -411,7 +440,9 @@ def required_packages() -> List[Text]: graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="not installed"): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) @pytest.mark.parametrize("required_packages", [["tensorflow"], ["tensorflow", "numpy"]]) @@ -424,7 +455,9 @@ def required_packages() -> List[Text]: graph_config = create_test_schema(uses=MyComponent) - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_run_param_not_satisfied(): @@ -435,7 +468,9 @@ def run(self, some_param: TrainingData) -> TrainingData: graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="needs the param"): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_run_param_satifisfied_due_to_default(): @@ -445,7 +480,9 @@ def run(self, some_param: TrainingData = TrainingData()) -> TrainingData: graph_config = create_test_schema(uses=MyComponent) - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_too_many_supplied_params(): @@ -468,7 +505,9 @@ def run(self, **kwargs: Any) -> TrainingData: uses=MyComponent, needs={"some_param": "parent"}, parent=TestComponentWithRun ) - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_run_fn_with_variable_length_positional_param(): @@ -480,7 +519,9 @@ def run(self, *args: Any, some_param: TrainingData) -> TrainingData: uses=MyComponent, needs={"some_param": "parent"}, parent=TestComponentWithRun ) - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_matching_params_due_to_constructor(): @@ -550,7 +591,9 @@ def load( pass graph_config = create_test_schema( - uses=MyComponent, eager=eager, constructor_name="load", + uses=MyComponent, + eager=eager, + constructor_name="load", ) with pytest.raises(GraphSchemaValidationException, match=error_message): @@ -583,7 +626,9 @@ def run(self) -> Domain: with pytest.raises( GraphSchemaValidationException, match="expects an input of type" ): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_parent_supplying_wrong_type_to_constructor(): @@ -614,7 +659,9 @@ def load( with pytest.raises( GraphSchemaValidationException, match="expects an input of type" ): - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_parent_supplying_subtype(): @@ -627,10 +674,14 @@ def run(self, training_data: TrainingData) -> TrainingData: pass graph_config = create_test_schema( - uses=MyComponent, parent=Parent, needs={"training_data": "parent"}, + uses=MyComponent, + parent=Parent, + needs={"training_data": "parent"}, ) - validation.validate(graph_config,) + validation.validate( + graph_config, + ) def test_child_accepting_any_type_from_parent(): @@ -643,10 +694,14 @@ def run(self, training_data: Any) -> TrainingData: pass graph_config = create_test_schema( - uses=MyComponent, parent=Parent, needs={"training_data": "parent"}, + uses=MyComponent, + parent=Parent, + needs={"training_data": "parent"}, ) - validation.validate(graph_config,) + validation.validate( + graph_config, + ) @pytest.mark.parametrize("is_train_graph", [True, False]) @@ -885,7 +940,8 @@ def test_validation_with_core_target_wrong_type(): ) with pytest.raises( - GraphSchemaValidationException, match="Core model's .* invalid return type", + GraphSchemaValidationException, + match="Core model's .* invalid return type", ): validation.validate( GraphModelConfiguration( @@ -1142,7 +1198,8 @@ def test_validate_validates_required_components( itertools.product(REQUIRED_COMPONENT_TEST_CASES, [True, False]), ) def test_validate_required_components( - test_case: List[RequiredComponentsTestCase], test_subclass: bool, + test_case: List[RequiredComponentsTestCase], + test_subclass: bool, ): graph_schema = _create_graph_schema_from_requirements( node_needs_requires=test_case.node_needs_requires_tuples, @@ -1151,7 +1208,9 @@ def test_validate_required_components( ) num_unmet = test_case.num_unmet_requirements if num_unmet == 0: - validation._validate_required_components(schema=graph_schema,) + validation._validate_required_components( + schema=graph_schema, + ) else: message = f"{num_unmet} components are missing" with pytest.raises(GraphSchemaValidationException, match=message): @@ -1170,7 +1229,8 @@ def test_validate_required_components( ), ) def test_recursively_validate_required_components( - test_case: List[RequiredComponentsTestCase], test_subclass: bool, + test_case: List[RequiredComponentsTestCase], + test_subclass: bool, ): graph_schema = _create_graph_schema_from_requirements( node_needs_requires=test_case.node_needs_requires_tuples, diff --git a/tests/engine/training/test_fingerprinting.py b/tests/engine/training/test_fingerprinting.py index d535e635e6b6..cb86488abd34 100644 --- a/tests/engine/training/test_fingerprinting.py +++ b/tests/engine/training/test_fingerprinting.py @@ -11,10 +11,14 @@ def test_fingerprint_stays_same(): key1 = fingerprinting.calculate_fingerprint_key( - TEDPolicy, TEDPolicy.get_default_config(), {"input": FingerprintableText("Hi")}, + TEDPolicy, + TEDPolicy.get_default_config(), + {"input": FingerprintableText("Hi")}, ) key2 = fingerprinting.calculate_fingerprint_key( - TEDPolicy, TEDPolicy.get_default_config(), {"input": FingerprintableText("Hi")}, + TEDPolicy, + TEDPolicy.get_default_config(), + {"input": FingerprintableText("Hi")}, ) assert key1 == key2 @@ -37,7 +41,9 @@ def test_fingerprint_changes_due_to_class(): def test_fingerprint_changes_due_to_config(): key1 = fingerprinting.calculate_fingerprint_key( - TEDPolicy, {}, {"input": FingerprintableText("Hi")}, + TEDPolicy, + {}, + {"input": FingerprintableText("Hi")}, ) key2 = fingerprinting.calculate_fingerprint_key( ResponseSelector, @@ -50,7 +56,9 @@ def test_fingerprint_changes_due_to_config(): def test_fingerprint_changes_due_to_inputs(): key1 = fingerprinting.calculate_fingerprint_key( - TEDPolicy, {}, {"input": FingerprintableText("Hi")}, + TEDPolicy, + {}, + {"input": FingerprintableText("Hi")}, ) key2 = fingerprinting.calculate_fingerprint_key( ResponseSelector, @@ -63,14 +71,18 @@ def test_fingerprint_changes_due_to_inputs(): def test_fingerprint_changes_due_to_changed_source(monkeypatch: MonkeyPatch): key1 = fingerprinting.calculate_fingerprint_key( - TEDPolicy, {}, {"input": FingerprintableText("Hi")}, + TEDPolicy, + {}, + {"input": FingerprintableText("Hi")}, ) get_source_mock = Mock(return_value="other implementation") monkeypatch.setattr(inspect, inspect.getsource.__name__, get_source_mock) key2 = fingerprinting.calculate_fingerprint_key( - TEDPolicy, {}, {"input": FingerprintableText("Hi")}, + TEDPolicy, + {}, + {"input": FingerprintableText("Hi")}, ) assert key1 != key2 diff --git a/tests/engine/training/test_graph_trainer.py b/tests/engine/training/test_graph_trainer.py index 9b5b7b6da972..81a1d2649926 100644 --- a/tests/engine/training/test_graph_trainer.py +++ b/tests/engine/training/test_graph_trainer.py @@ -59,7 +59,9 @@ def test_graph_trainer_returns_model_metadata( uses=PersistableTestComponent, fn="train", constructor_name="create", - config={"test_value": test_value,}, + config={ + "test_value": test_value, + }, is_target=True, ), "load": SchemaNode( @@ -495,7 +497,8 @@ def test_graph_trainer_train_logging_with_cached_components( def test_resources_fingerprints_are_unique_when_cached( - temp_cache: LocalTrainingCache, train_with_schema: Callable, + temp_cache: LocalTrainingCache, + train_with_schema: Callable, ): train_schema = GraphSchema( { @@ -542,7 +545,8 @@ def test_resources_fingerprints_are_unique_when_cached( def test_resources_fingerprints_remain_after_being_cached( - temp_cache: LocalTrainingCache, train_with_schema: Callable, + temp_cache: LocalTrainingCache, + train_with_schema: Callable, ): train_schema = GraphSchema( { @@ -613,7 +617,11 @@ def test_exception_handling_for_on_before_hook( default_execution_context: ExecutionContext, ): schema_node = SchemaNode( - needs={}, uses=ProvideX, fn="provide", constructor_name="create", config={}, + needs={}, + uses=ProvideX, + fn="provide", + constructor_name="create", + config={}, ) class MyHook(GraphNodeHook): diff --git a/tests/engine/training/test_hooks.py b/tests/engine/training/test_hooks.py index d248048463a1..8d2676607713 100644 --- a/tests/engine/training/test_hooks.py +++ b/tests/engine/training/test_hooks.py @@ -11,7 +11,8 @@ def test_training_hook_saves_to_cache( - default_model_storage: ModelStorage, temp_cache: TrainingCache, + default_model_storage: ModelStorage, + temp_cache: TrainingCache, ): # We need an execution context so the hook can determine the class of the graph # component @@ -71,7 +72,8 @@ def test_training_hook_saves_to_cache( def test_training_hook_does_not_cache_cached_component( - default_model_storage: ModelStorage, temp_cache: TrainingCache, + default_model_storage: ModelStorage, + temp_cache: TrainingCache, ): # We need an execution context so the hook can determine the class of the graph # component diff --git a/tests/graph_components/converters/test_nlu_message_converter.py b/tests/graph_components/converters/test_nlu_message_converter.py index f3a0e891dfa3..9264ab7458b3 100644 --- a/tests/graph_components/converters/test_nlu_message_converter.py +++ b/tests/graph_components/converters/test_nlu_message_converter.py @@ -8,7 +8,8 @@ def test_nlu_message_converter_converts_message( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, + default_execution_context: ExecutionContext, ): component = NLUMessageConverter.create( {**NLUMessageConverter.get_default_config()}, @@ -28,10 +29,14 @@ def test_nlu_message_converter_converts_message( def test_nlu_message_converter_converts_message_with_metadata( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, + default_execution_context: ExecutionContext, ): component = NLUMessageConverter.create( - {}, default_model_storage, Resource("with_metadata"), default_execution_context, + {}, + default_model_storage, + Resource("with_metadata"), + default_execution_context, ) message = UserMessage(text="Hello", metadata={"test_key": "test_value"}) @@ -44,7 +49,8 @@ def test_nlu_message_converter_converts_message_with_metadata( def test_nlu_message_converter_handles_no_user_message( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, + default_execution_context: ExecutionContext, ): component = NLUMessageConverter.create( {}, diff --git a/tests/graph_components/providers/test_story_graph_provider.py b/tests/graph_components/providers/test_story_graph_provider.py index 7701535619eb..9b7b9552b2fb 100644 --- a/tests/graph_components/providers/test_story_graph_provider.py +++ b/tests/graph_components/providers/test_story_graph_provider.py @@ -10,7 +10,12 @@ @pytest.mark.parametrize( - "config", [{}, {"exclusion_percentage": None}, {"exclusion_percentage": 25},], + "config", + [ + {}, + {"exclusion_percentage": None}, + {"exclusion_percentage": 25}, + ], ) def test_story_graph_provider_provide( default_model_storage: ModelStorage, diff --git a/tests/graph_components/validators/test_finetuning_validator.py b/tests/graph_components/validators/test_finetuning_validator.py index fef3da5bcaee..f666e59acf89 100644 --- a/tests/graph_components/validators/test_finetuning_validator.py +++ b/tests/graph_components/validators/test_finetuning_validator.py @@ -209,7 +209,9 @@ def _get_example_schema(num_epochs: int = 5, other_parameter: int = 10) -> Graph @pytest.mark.parametrize("nlu, core", [(True, False), (False, True), (True, True)]) def test_validate_after_changing_epochs_in_config( - get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, + get_validation_method: Callable[..., ValidationMethodType], + nlu: bool, + core: bool, ): # training schema1 = _get_example_schema(num_epochs=5) @@ -232,7 +234,9 @@ def test_validate_after_changing_epochs_in_config( @pytest.mark.parametrize("nlu, core", [(True, False), (False, True), (True, True)]) def test_validate_after_changing_constructor( - get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, + get_validation_method: Callable[..., ValidationMethodType], + nlu: bool, + core: bool, ): # training schema1 = _get_example_schema(num_epochs=5) @@ -253,7 +257,9 @@ def test_validate_after_changing_constructor( @pytest.mark.parametrize("nlu, core", [(True, False), (False, True), (True, True)]) def test_validate_after_removing_node_from_schema( - get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, + get_validation_method: Callable[..., ValidationMethodType], + nlu: bool, + core: bool, ): # training schema1 = _get_example_schema(num_epochs=5) @@ -268,7 +274,11 @@ def test_validate_after_removing_node_from_schema( # finetuning raises - doesn't matter if it's nlu/core/both loaded_validate = get_validation_method( - finetuning=True, load=True, nlu=nlu, core=core, graph_schema=schema2, + finetuning=True, + load=True, + nlu=nlu, + core=core, + graph_schema=schema2, ) with pytest.raises(InvalidConfigException): loaded_validate(importer=EmptyDataImporter()) @@ -276,7 +286,9 @@ def test_validate_after_removing_node_from_schema( @pytest.mark.parametrize("nlu, core", [(True, False), (False, True), (True, True)]) def test_validate_after_adding_node_to_schema( - get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, + get_validation_method: Callable[..., ValidationMethodType], + nlu: bool, + core: bool, ): # training schema1 = _get_example_schema() @@ -344,7 +356,9 @@ def test_validate_after_replacing_something_in_schema( @pytest.mark.parametrize("nlu, core", [(True, False), (False, True), (True, True)]) def test_validate_after_adding_adding_default_parameter( - get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, + get_validation_method: Callable[..., ValidationMethodType], + nlu: bool, + core: bool, ): # create a schema and rely on rasa to fill in defaults later schema1 = _get_example_schema() @@ -513,7 +527,9 @@ def test_validate_with_other_version( @pytest.mark.parametrize("nlu, core", [(True, False), (False, True), (True, True)]) def test_validate_with_finetuning_fails_without_training( - get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, + get_validation_method: Callable[..., ValidationMethodType], + nlu: bool, + core: bool, ): validate = get_validation_method(finetuning=True, load=False, nlu=nlu, core=core) with pytest.raises(InvalidConfigException): diff --git a/tests/nlu/classifiers/test_fallback_classifier.py b/tests/nlu/classifiers/test_fallback_classifier.py index a3e80538b9ac..e93567c1879a 100644 --- a/tests/nlu/classifiers/test_fallback_classifier.py +++ b/tests/nlu/classifiers/test_fallback_classifier.py @@ -185,7 +185,8 @@ def test_not_predict_fallback_intent( def test_defaults( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, + default_execution_context: ExecutionContext, ): classifier = create_fallback_classifier( {}, default_model_storage, default_execution_context diff --git a/tests/nlu/classifiers/test_keyword_classifier.py b/tests/nlu/classifiers/test_keyword_classifier.py index b2df98b30808..5599af383da6 100644 --- a/tests/nlu/classifiers/test_keyword_classifier.py +++ b/tests/nlu/classifiers/test_keyword_classifier.py @@ -22,7 +22,8 @@ def training_data(nlu_as_json_path: Text): @pytest.fixture() def default_keyword_intent_classifier( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, + default_execution_context: ExecutionContext, ): return KeywordIntentClassifier.create( KeywordIntentClassifier.get_default_config(), @@ -42,12 +43,18 @@ def test_persist_and_load( default_execution_context: ExecutionContext, ): classifier = KeywordIntentClassifier.create( - config, default_model_storage, Resource("keyword"), default_execution_context, + config, + default_model_storage, + Resource("keyword"), + default_execution_context, ) classifier.train(training_data) loaded_classifier = KeywordIntentClassifier.load( - config, default_model_storage, Resource("keyword"), default_execution_context, + config, + default_model_storage, + Resource("keyword"), + default_execution_context, ) predicted = copy.copy(training_data) @@ -85,7 +92,9 @@ def test_classification( assert m.get("intent").get("name", "NOT_CLASSIFIED") == intent -def test_valid_data(default_keyword_intent_classifier: KeywordIntentClassifier,): +def test_valid_data( + default_keyword_intent_classifier: KeywordIntentClassifier, +): json_data = { "rasa_nlu_data": { "common_examples": [ @@ -106,7 +115,9 @@ def test_valid_data(default_keyword_intent_classifier: KeywordIntentClassifier,) @pytest.mark.filterwarnings("ignore:Keyword.* of keywords:UserWarning") -def test_identical_data(default_keyword_intent_classifier: KeywordIntentClassifier,): +def test_identical_data( + default_keyword_intent_classifier: KeywordIntentClassifier, +): json_data = { "rasa_nlu_data": { "common_examples": [ @@ -128,7 +139,9 @@ def test_identical_data(default_keyword_intent_classifier: KeywordIntentClassifi @pytest.mark.filterwarnings("ignore:Keyword.* of keywords:UserWarning") -def test_ambiguous_data(default_keyword_intent_classifier: KeywordIntentClassifier,): +def test_ambiguous_data( + default_keyword_intent_classifier: KeywordIntentClassifier, +): json_data = { "rasa_nlu_data": { "common_examples": [ diff --git a/tests/nlu/classifiers/test_sklearn_classifier.py b/tests/nlu/classifiers/test_sklearn_classifier.py index 23bf14293c59..40ed87a7b937 100644 --- a/tests/nlu/classifiers/test_sklearn_classifier.py +++ b/tests/nlu/classifiers/test_sklearn_classifier.py @@ -23,7 +23,8 @@ def training_data(nlu_data_path: Text) -> TrainingData: @pytest.fixture() def default_sklearn_intent_classifier( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, + default_execution_context: ExecutionContext, ): return SklearnIntentClassifier.create( SklearnIntentClassifier.get_default_config(), @@ -47,7 +48,10 @@ def test_persist_and_load( ) training_data, loaded_pipeline = train_and_preprocess( - pipeline=[{"component": SpacyTokenizer}, {"component": SpacyFeaturizer},], + pipeline=[ + {"component": SpacyTokenizer}, + {"component": SpacyFeaturizer}, + ], training_data=training_data, ) default_sklearn_intent_classifier.train(training_data) diff --git a/tests/nlu/conftest.py b/tests/nlu/conftest.py index 0a305e93589c..fe61c97f293d 100644 --- a/tests/nlu/conftest.py +++ b/tests/nlu/conftest.py @@ -36,7 +36,8 @@ def train_and_preprocess( default_model_storage: ModelStorage, ) -> Callable[..., Tuple[TrainingData, List[GraphComponent]]]: def inner( - pipeline: List[Dict[Text, Any]], training_data: Union[Text, TrainingData], + pipeline: List[Dict[Text, Any]], + training_data: Union[Text, TrainingData], ) -> Tuple[TrainingData, List[GraphComponent]]: if isinstance(training_data, str): @@ -73,8 +74,13 @@ def create_component( @pytest.fixture() -def process_message(default_model_storage: ModelStorage,) -> Callable[..., Message]: - def inner(loaded_pipeline: List[GraphComponent], message: Message,) -> Message: +def process_message( + default_model_storage: ModelStorage, +) -> Callable[..., Message]: + def inner( + loaded_pipeline: List[GraphComponent], + message: Message, + ) -> Message: for component in loaded_pipeline: component.process([message]) @@ -92,7 +98,8 @@ def spacy_tokenizer() -> SpacyTokenizer: @pytest.fixture() def spacy_featurizer() -> SpacyFeaturizer: return SpacyFeaturizer( - SpacyFeaturizer.get_default_config(), name="SpacyFeaturizer", + SpacyFeaturizer.get_default_config(), + name="SpacyFeaturizer", ) diff --git a/tests/nlu/extractors/test_duckling_entity_extractor.py b/tests/nlu/extractors/test_duckling_entity_extractor.py index babd487191f0..1c3c4ba0954a 100644 --- a/tests/nlu/extractors/test_duckling_entity_extractor.py +++ b/tests/nlu/extractors/test_duckling_entity_extractor.py @@ -13,7 +13,8 @@ @pytest.fixture() def create_duckling( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, + default_execution_context: ExecutionContext, ) -> Callable[[Dict[Text, Any]], DucklingEntityExtractor]: def inner(config: Dict[Text, Any]) -> DucklingEntityExtractor: return DucklingEntityExtractor.create( diff --git a/tests/nlu/extractors/test_extractor.py b/tests/nlu/extractors/test_extractor.py index 888e9bdeecae..b15e0389dd44 100644 --- a/tests/nlu/extractors/test_extractor.py +++ b/tests/nlu/extractors/test_extractor.py @@ -254,7 +254,9 @@ def test_convert_tags_to_entities( "address", ], }, - {"entity": [1.0, 1.0, 1.0, 1.0, 0.98, 0.78, 1.0, 0.89, 1.0, 1.0, 1.0],}, + { + "entity": [1.0, 1.0, 1.0, 1.0, 0.98, 0.78, 1.0, 0.89, 1.0, 1.0, 1.0], + }, [ { "entity": "address", diff --git a/tests/nlu/extractors/test_mitie_entity_extractor.py b/tests/nlu/extractors/test_mitie_entity_extractor.py index ec533064f0f0..c95b0e73bed5 100644 --- a/tests/nlu/extractors/test_mitie_entity_extractor.py +++ b/tests/nlu/extractors/test_mitie_entity_extractor.py @@ -61,7 +61,10 @@ def inner(config: Dict[Text, Any], load: bool = False) -> MitieEntityExtractor: model_storage=default_model_storage, execution_context=default_execution_context, resource=Resource("MitieEntityExtractor"), - config={**MitieEntityExtractor.get_default_config(), **config,}, + config={ + **MitieEntityExtractor.get_default_config(), + **config, + }, ) return inner diff --git a/tests/nlu/extractors/test_regex_entity_extractor.py b/tests/nlu/extractors/test_regex_entity_extractor.py index 94423c4cad38..8b2ba632aceb 100644 --- a/tests/nlu/extractors/test_regex_entity_extractor.py +++ b/tests/nlu/extractors/test_regex_entity_extractor.py @@ -116,7 +116,12 @@ def inner(config: Dict[Text, Any], load: bool = False) -> RegexEntityExtractor: ( {"use_word_boundaries": False}, "北京和上海都是大城市。", - [{"name": "city", "elements": ["北京", "上海", "广州", "深圳", "杭州"],}], + [ + { + "name": "city", + "elements": ["北京", "上海", "广州", "深圳", "杭州"], + } + ], [ { ENTITY_ATTRIBUTE_TYPE: "city", @@ -139,7 +144,10 @@ def inner(config: Dict[Text, Any], load: bool = False) -> RegexEntityExtractor: {"use_word_boundaries": False}, "小明正要去北京拜访老李。", [ - {"name": "city", "elements": ["北京", "上海", "广州", "深圳", "杭州"],}, + { + "name": "city", + "elements": ["北京", "上海", "广州", "深圳", "杭州"], + }, {"name": "person", "elements": ["小明", "小红", "小王", "小李"]}, ], [ @@ -164,7 +172,10 @@ def inner(config: Dict[Text, Any], load: bool = False) -> RegexEntityExtractor: {"use_word_boundaries": False}, "Rasa 真好用。", [ - {"name": "city", "elements": ["北京", "上海", "广州", "深圳", "杭州"],}, + { + "name": "city", + "elements": ["北京", "上海", "广州", "深圳", "杭州"], + }, {"name": "person", "elements": ["小明", "小红", "小王", "小李"]}, ], [], diff --git a/tests/nlu/featurizers/test_convert_featurizer.py b/tests/nlu/featurizers/test_convert_featurizer.py index e75abf39df7d..e5f9eb733084 100644 --- a/tests/nlu/featurizers/test_convert_featurizer.py +++ b/tests/nlu/featurizers/test_convert_featurizer.py @@ -27,7 +27,8 @@ @pytest.fixture def create_or_load_convert_featurizer( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, + default_execution_context: ExecutionContext, ) -> Callable[[Dict[Text, Any], bool], ConveRTFeaturizer]: def inner( config: Dict[Text, Any], load: bool = False @@ -54,7 +55,9 @@ def test_convert_featurizer_process( ): monkeypatch.setattr( - ConveRTFeaturizer, "_validate_model_url", lambda _: RESTRICTED_ACCESS_URL, + ConveRTFeaturizer, + "_validate_model_url", + lambda _: RESTRICTED_ACCESS_URL, ) component_config = { FEATURIZER_CLASS_ALIAS: "alias", @@ -95,7 +98,9 @@ def test_convert_featurizer_train( ): monkeypatch.setattr( - ConveRTFeaturizer, "_validate_model_url", lambda _: None, + ConveRTFeaturizer, + "_validate_model_url", + lambda _: None, ) component_config = { FEATURIZER_CLASS_ALIAS: "alias", @@ -166,7 +171,9 @@ def test_convert_featurizer_tokens_to_text( ): monkeypatch.setattr( - ConveRTFeaturizer, "_validate_model_url", lambda _: None, + ConveRTFeaturizer, + "_validate_model_url", + lambda _: None, ) component_config = { FEATURIZER_CLASS_ALIAS: "alias", @@ -209,7 +216,9 @@ def test_convert_featurizer_token_edge_cases( ): monkeypatch.setattr( - ConveRTFeaturizer, "_validate_model_url", lambda _: None, + ConveRTFeaturizer, + "_validate_model_url", + lambda _: None, ) component_config = { FEATURIZER_CLASS_ALIAS: "alias", @@ -240,7 +249,9 @@ def test_convert_featurizer_number_of_sub_tokens( ): monkeypatch.setattr( - ConveRTFeaturizer, "_validate_model_url", lambda _: None, + ConveRTFeaturizer, + "_validate_model_url", + lambda _: None, ) component_config = { FEATURIZER_CLASS_ALIAS: "alias", diff --git a/tests/nlu/featurizers/test_count_vectors_featurizer.py b/tests/nlu/featurizers/test_count_vectors_featurizer.py index d5e189f3b4c3..325a47b25f47 100644 --- a/tests/nlu/featurizers/test_count_vectors_featurizer.py +++ b/tests/nlu/featurizers/test_count_vectors_featurizer.py @@ -240,7 +240,11 @@ def test_count_vector_featurizer_shared_vocab( create_featurizer: Callable[..., CountVectorsFeaturizer], whitespace_tokenizer: WhitespaceTokenizer, ): - ftr = create_featurizer({"use_shared_vocab": True,}) + ftr = create_featurizer( + { + "use_shared_vocab": True, + } + ) train_message = Message(data={TEXT: sentence}) # this is needed for a valid training example @@ -323,7 +327,10 @@ def test_count_vector_featurizer_oov_words( whitespace_tokenizer: WhitespaceTokenizer, ): ftr = create_featurizer( - {"OOV_token": "__oov__", "OOV_words": ["oov_word0", "OOV_word1"],} + { + "OOV_token": "__oov__", + "OOV_words": ["oov_word0", "OOV_word1"], + } ) train_message = Message(data={TEXT: sentence}) whitespace_tokenizer.process([train_message]) @@ -397,7 +404,13 @@ def test_count_vector_featurizer_char( create_featurizer: Callable[..., CountVectorsFeaturizer], whitespace_tokenizer: WhitespaceTokenizer, ): - ftr = create_featurizer({"min_ngram": 1, "max_ngram": 2, "analyzer": "char",}) + ftr = create_featurizer( + { + "min_ngram": 1, + "max_ngram": 2, + "analyzer": "char", + } + ) train_message = Message(data={TEXT: sentence}) whitespace_tokenizer.process([train_message]) @@ -621,7 +634,11 @@ def test_count_vector_featurizer_action_attribute_featurization( create_featurizer: Callable[..., CountVectorsFeaturizer], whitespace_tokenizer: WhitespaceTokenizer, ): - ftr = create_featurizer({"token_pattern": r"(?u)\b\w+\b",}) + ftr = create_featurizer( + { + "token_pattern": r"(?u)\b\w+\b", + } + ) train_message = Message(data={TEXT: sentence}) # this is needed for a valid training example @@ -687,7 +704,11 @@ def test_count_vector_featurizer_process_by_attribute( create_featurizer: Callable[..., CountVectorsFeaturizer], whitespace_tokenizer: WhitespaceTokenizer, ): - ftr = create_featurizer({"token_pattern": r"(?u)\b\w+\b",}) + ftr = create_featurizer( + { + "token_pattern": r"(?u)\b\w+\b", + } + ) # add a second example that has some response, so that the vocabulary for # response exists @@ -773,7 +794,10 @@ def test_cvf_incremental_training( @pytest.mark.parametrize( "initial_train_text, additional_train_text, " "use_shared_vocab", - [("am I the coolest person?", "no", True), ("rasa rasa", "sara sara", False),], + [ + ("am I the coolest person?", "no", True), + ("rasa rasa", "sara sara", False), + ], ) def test_use_shared_vocab_exception( initial_train_text: Text, diff --git a/tests/nlu/featurizers/test_featurizer.py b/tests/nlu/featurizers/test_featurizer.py index a94b5f7c71bb..3b81a7106498 100644 --- a/tests/nlu/featurizers/test_featurizer.py +++ b/tests/nlu/featurizers/test_featurizer.py @@ -39,8 +39,18 @@ np.array([[0.0, 0.0, 0.0, 0.0]]), ), # "max" - special cases to be aware of - ("max", np.array([[-1.0], [0.0]]), False, np.array([[0.0]]),), - ("max", np.array([[-1.0], [0.0]]), True, np.array([[-1.0]]),), + ( + "max", + np.array([[-1.0], [0.0]]), + False, + np.array([[0.0]]), + ), + ( + "max", + np.array([[-1.0], [0.0]]), + True, + np.array([[-1.0]]), + ), ], ) def test_calculate_cls_vector(pooling, features, only_non_zero_vectors, expected): diff --git a/tests/nlu/featurizers/test_lexical_syntactic_featurizer.py b/tests/nlu/featurizers/test_lexical_syntactic_featurizer.py index 48f586694d76..0ada31f1d4c1 100644 --- a/tests/nlu/featurizers/test_lexical_syntactic_featurizer.py +++ b/tests/nlu/featurizers/test_lexical_syntactic_featurizer.py @@ -36,7 +36,10 @@ def create_lexical_syntactic_featurizer( ) -> Callable[[Dict[Text, Any]], LexicalSyntacticFeaturizer]: def inner(config: Dict[Text, Any]): return LexicalSyntacticFeaturizer.create( - config={**LexicalSyntacticFeaturizer.get_default_config(), **config,}, + config={ + **LexicalSyntacticFeaturizer.get_default_config(), + **config, + }, model_storage=default_model_storage, execution_context=default_execution_context, resource=resource_lexical_syntactic_featurizer, @@ -52,7 +55,11 @@ def inner(config: Dict[Text, Any]): ( "hello goodbye hello", None, - [["BOS", "upper"], ["BOS", "EOS", "prefix2", "digit"], ["EOS", "low"],], + [ + ["BOS", "upper"], + ["BOS", "EOS", "prefix2", "digit"], + ["EOS", "low"], + ], [ [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0], @@ -63,7 +70,11 @@ def inner(config: Dict[Text, Any]): ( "a 1", None, - [["BOS", "upper"], ["BOS", "EOS", "prefix2", "digit"], ["EOS", "low"],], + [ + ["BOS", "upper"], + ["BOS", "EOS", "prefix2", "digit"], + ["EOS", "low"], + ], [ [0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0], @@ -249,7 +260,10 @@ def test_create_train_load_and_process( featurizer.train(TrainingData([message])) loaded_featurizer = LexicalSyntacticFeaturizer.load( - config={**LexicalSyntacticFeaturizer.get_default_config(), **config,}, + config={ + **LexicalSyntacticFeaturizer.get_default_config(), + **config, + }, model_storage=default_model_storage, execution_context=default_execution_context, resource=resource_lexical_syntactic_featurizer, @@ -287,7 +301,13 @@ def test_validate_config(config: Dict[Text, Any], raises: bool): @pytest.mark.parametrize( "sentence, feature_config, expected_features", - [("The sun is shining", [["pos", "pos2"]], np.ones(shape=(4, 2)),),], + [ + ( + "The sun is shining", + [["pos", "pos2"]], + np.ones(shape=(4, 2)), + ), + ], ) def test_warn_if_part_of_speech_features_cannot_be_computed( create_lexical_syntactic_featurizer: Callable[ diff --git a/tests/nlu/featurizers/test_mitie_featurizer.py b/tests/nlu/featurizers/test_mitie_featurizer.py index 421067a19673..7d84409a7514 100644 --- a/tests/nlu/featurizers/test_mitie_featurizer.py +++ b/tests/nlu/featurizers/test_mitie_featurizer.py @@ -43,7 +43,10 @@ def create( ) -> Callable[[Dict[Text, Any]], MitieFeaturizer]: def inner(config: Dict[Text, Any]): return MitieFeaturizer.create( - config={**MitieFeaturizer.get_default_config(), **config,}, + config={ + **MitieFeaturizer.get_default_config(), + **config, + }, model_storage=default_model_storage, execution_context=default_execution_context, resource=resource, diff --git a/tests/nlu/featurizers/test_regex_featurizer.py b/tests/nlu/featurizers/test_regex_featurizer.py index 384de53a76d4..dd1351a5cef5 100644 --- a/tests/nlu/featurizers/test_regex_featurizer.py +++ b/tests/nlu/featurizers/test_regex_featurizer.py @@ -73,13 +73,23 @@ def inner( ), ( "blah balh random eh", - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0],], + [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ], [0.0, 0.0, 0.0], [], ), ( "a 1 digit number", - [[0.0, 0.0, 0.0], [1.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0],], + [ + [0.0, 0.0, 0.0], + [1.0, 0.0, 1.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ], [1.0, 0.0, 1.0], [1, 1], ), @@ -176,8 +186,14 @@ def test_lookup_tables_without_use_word_boundaries( from rasa.nlu.tokenizers.tokenizer import Token lookups = [ - {"name": "cites", "elements": ["北京", "上海", "广州", "深圳", "杭州"],}, - {"name": "dates", "elements": ["昨天", "今天", "明天", "后天"],}, + { + "name": "cites", + "elements": ["北京", "上海", "广州", "深圳", "杭州"], + }, + { + "name": "dates", + "elements": ["昨天", "今天", "明天", "后天"], + }, ] ftr = create_featurizer({"use_word_boundaries": False}) training_data = TrainingData() @@ -221,7 +237,14 @@ def test_lookup_tables_without_use_word_boundaries( ), ( "Is burrito my favorite food?", - [[0.0, 0.0], [0.0, 1.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0],], + [ + [0.0, 0.0], + [0.0, 1.0], + [0.0, 0.0], + [0.0, 0.0], + [0.0, 0.0], + [0.0, 0.0], + ], [0.0, 1.0], [1.0], ), @@ -394,7 +417,8 @@ def test_regex_featurizer_case_sensitive( {"pattern": "[0-1]+", "name": "binary", "usage": "intent"}, ] ftr = create_featurizer( - {"case_sensitive": case_sensitive}, known_patterns=patterns, + {"case_sensitive": case_sensitive}, + known_patterns=patterns, ) # adds tokens to the message diff --git a/tests/nlu/featurizers/test_spacy_featurizer.py b/tests/nlu/featurizers/test_spacy_featurizer.py index b45071b13183..8f6c35b53bc2 100644 --- a/tests/nlu/featurizers/test_spacy_featurizer.py +++ b/tests/nlu/featurizers/test_spacy_featurizer.py @@ -14,7 +14,8 @@ def create_spacy_featurizer(config: Dict[Text, Any]) -> SpacyFeaturizer: return SpacyFeaturizer( - {**SpacyFeaturizer.get_default_config(), **config}, "spacy_featurizer", + {**SpacyFeaturizer.get_default_config(), **config}, + "spacy_featurizer", ) diff --git a/tests/nlu/test_evaluation.py b/tests/nlu/test_evaluation.py index fe5892c54597..ca6b8502ec23 100644 --- a/tests/nlu/test_evaluation.py +++ b/tests/nlu/test_evaluation.py @@ -220,7 +220,10 @@ def test_determine_token_labels_with_extractors(): label = determine_token_labels( CH_correct_segmentation[0], [CH_correct_entity, CH_wrong_entity], - {SpacyEntityExtractor.__name__, MitieEntityExtractor.__name__,}, + { + SpacyEntityExtractor.__name__, + MitieEntityExtractor.__name__, + }, ) assert label == "direction" @@ -397,7 +400,9 @@ async def test_run_evaluation_with_regex_message(mood_agent: Agent, tmp_path: Pa async def test_eval_data( - tmp_path: Path, project: Text, trained_rasa_model: Text, + tmp_path: Path, + project: Text, + trained_rasa_model: Text, ): config_path = os.path.join(project, "config.yml") data_importer = TrainingDataImporter.load_nlu_importer_from_config( @@ -1017,7 +1022,13 @@ async def test_nlu_comparison( monkeypatch.setattr( sys.modules["rasa.nlu.test"], "get_eval_data", - AsyncMock(return_value=(1, None, (None,),)), + AsyncMock( + return_value=( + 1, + None, + (None,), + ) + ), ) monkeypatch.setattr( sys.modules["rasa.nlu.test"], @@ -1221,7 +1232,9 @@ def __init__(self, prediction_to_return: Dict[Text, Any]) -> None: self.prediction = prediction_to_return async def parse_message( - self, message: UserMessage, only_output_properties: bool = True, + self, + message: UserMessage, + only_output_properties: bool = True, ) -> Dict[Text, Any]: return self.prediction diff --git a/tests/nlu/test_spacy_utils.py b/tests/nlu/test_spacy_utils.py index 5e6157791c76..052cedbbf633 100644 --- a/tests/nlu/test_spacy_utils.py +++ b/tests/nlu/test_spacy_utils.py @@ -13,7 +13,8 @@ def create_spacy_nlp_component( - model_name: Text = "en_core_web_md", case_sensitive: Optional[bool] = None, + model_name: Text = "en_core_web_md", + case_sensitive: Optional[bool] = None, ) -> SpacyNLP: component = SpacyNLP.create( {"model": model_name, "case_sensitive": case_sensitive}, None, None, None diff --git a/tests/nlu/tokenizers/test_jieba_tokenizer.py b/tests/nlu/tokenizers/test_jieba_tokenizer.py index afd9e631cd48..cc5279048bf0 100644 --- a/tests/nlu/tokenizers/test_jieba_tokenizer.py +++ b/tests/nlu/tokenizers/test_jieba_tokenizer.py @@ -20,7 +20,10 @@ def create_jieba(config: Optional[Dict] = None) -> JiebaTokenizer: config = config if config else {} return JiebaTokenizer.create( - {**JiebaTokenizer.get_default_config(), **config}, None, None, None, + {**JiebaTokenizer.get_default_config(), **config}, + None, + None, + None, ) diff --git a/tests/nlu/tokenizers/test_tokenizer.py b/tests/nlu/tokenizers/test_tokenizer.py index 473fa42c3d28..563293373988 100644 --- a/tests/nlu/tokenizers/test_tokenizer.py +++ b/tests/nlu/tokenizers/test_tokenizer.py @@ -20,7 +20,10 @@ def create_whitespace_tokenizer( config: Optional[Dict[Text, Any]] = None ) -> WhitespaceTokenizer: return WhitespaceTokenizer( - {**WhitespaceTokenizer.get_default_config(), **(config if config else {}),} + { + **WhitespaceTokenizer.get_default_config(), + **(config if config else {}), + } ) diff --git a/tests/nlu/tokenizers/test_whitespace_tokenizer.py b/tests/nlu/tokenizers/test_whitespace_tokenizer.py index b9716e0994c8..0cfd6ee5983b 100644 --- a/tests/nlu/tokenizers/test_whitespace_tokenizer.py +++ b/tests/nlu/tokenizers/test_whitespace_tokenizer.py @@ -9,9 +9,13 @@ from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer -def create_whitespace_tokenizer(config: Optional[Dict] = None,) -> WhitespaceTokenizer: +def create_whitespace_tokenizer( + config: Optional[Dict] = None, +) -> WhitespaceTokenizer: config = config if config else {} - return WhitespaceTokenizer({**WhitespaceTokenizer.get_default_config(), **config},) + return WhitespaceTokenizer( + {**WhitespaceTokenizer.get_default_config(), **config}, + ) @pytest.mark.parametrize( diff --git a/tests/nlu/utils/test_mitie_utils.py b/tests/nlu/utils/test_mitie_utils.py index 5cb24ade65ce..fe9d5556ac54 100644 --- a/tests/nlu/utils/test_mitie_utils.py +++ b/tests/nlu/utils/test_mitie_utils.py @@ -54,7 +54,8 @@ def test_provide_different_path( def test_invalid_path( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, + default_execution_context: ExecutionContext, ): with pytest.raises(RasaException): MitieNLP.create( diff --git a/tests/shared/core/test_constants.py b/tests/shared/core/test_constants.py index 3396309ebe37..5c3578c55534 100644 --- a/tests/shared/core/test_constants.py +++ b/tests/shared/core/test_constants.py @@ -12,7 +12,10 @@ @pytest.mark.parametrize( "name_in_constant, policy_class", - [(POLICY_NAME_RULE, RulePolicy), (CLASSIFIER_NAME_FALLBACK, FallbackClassifier),], + [ + (POLICY_NAME_RULE, RulePolicy), + (CLASSIFIER_NAME_FALLBACK, FallbackClassifier), + ], ) def test_policy_names(name_in_constant: Text, policy_class: Type): assert name_in_constant == policy_class.__name__ @@ -20,7 +23,9 @@ def test_policy_names(name_in_constant: Text, policy_class: Type): @pytest.mark.parametrize( "name_in_constant, classifier_class", - [(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),], + [ + (CLASSIFIER_NAME_FALLBACK, FallbackClassifier), + ], ) def test_classifier_names(name_in_constant: Text, classifier_class: Type): assert name_in_constant == classifier_class.__name__ diff --git a/tests/shared/core/test_domain.py b/tests/shared/core/test_domain.py index 377836df25eb..4acc8da3b851 100644 --- a/tests/shared/core/test_domain.py +++ b/tests/shared/core/test_domain.py @@ -1302,20 +1302,27 @@ def test_form_invalid_required_slots_raises(): KEY_SLOTS: { "my_slot": { "type": "text", - "mappings": [{"type": "from_entity", "intent": "greet"},], + "mappings": [ + {"type": "from_entity", "intent": "greet"}, + ], } } }, { KEY_SLOTS: { - "my_slot": {"type": "text", "mappings": [{"type": "from_intent"}],} + "my_slot": { + "type": "text", + "mappings": [{"type": "from_intent"}], + } } }, { KEY_SLOTS: { "my_slot": { "type": "text", - "mappings": [{"type": "from_intent", "value": None},], + "mappings": [ + {"type": "from_intent", "value": None}, + ], } } }, @@ -1331,7 +1338,9 @@ def test_form_invalid_required_slots_raises(): KEY_SLOTS: { "my_slot": { "type": "text", - "mappings": [{"type": "from_trigger_intent", "value": None},], + "mappings": [ + {"type": "from_trigger_intent", "value": None}, + ], } } }, diff --git a/tests/shared/core/test_events.py b/tests/shared/core/test_events.py index 060b9b8d38c8..d2393090ead4 100644 --- a/tests/shared/core/test_events.py +++ b/tests/shared/core/test_events.py @@ -181,7 +181,9 @@ def test_json_parse_action_executed_with_no_hide_rule(): "timestamp": None, } deserialised: ActionExecuted = Event.from_parameters(evt) - expected = ActionExecuted("action_listen",) + expected = ActionExecuted( + "action_listen", + ) assert deserialised == expected assert deserialised.hide_rule_turn == expected.hide_rule_turn @@ -371,19 +373,27 @@ def test_md_format_message_using_short_entity_syntax(): def test_md_format_message_using_short_entity_syntax_no_start_end(): formatted = format_message( - "hello", intent="location", entities=[{"entity": "city", "value": "Berlin"}], + "hello", + intent="location", + entities=[{"entity": "city", "value": "Berlin"}], ) assert formatted == "hello" def test_md_format_message_no_text(): - formatted = format_message("", intent="location", entities=[],) + formatted = format_message( + "", + intent="location", + entities=[], + ) assert formatted == "" def test_md_format_message_using_short_entity_syntax_no_start_end_or_text(): formatted = format_message( - "", intent="location", entities=[{"entity": "city", "value": "Berlin"}], + "", + intent="location", + entities=[{"entity": "city", "value": "Berlin"}], ) assert formatted == "" @@ -415,7 +425,11 @@ def test_md_format_message_using_long_entity_syntax_no_start_end(): intent="location", entities=[ {"start": 10, "end": 16, "entity": "city", "value": "Berlin"}, - {"entity": "country", "value": "Germany", "role": "destination",}, + { + "entity": "country", + "value": "Germany", + "role": "destination", + }, ], ) assert formatted == "I am from [Berlin](city)." @@ -556,15 +570,25 @@ def test_split_events( True, ), # providing a single `action_listen` is not a session start - ([ActionExecuted(ACTION_LISTEN_NAME, timestamp=3)], False,), + ( + [ActionExecuted(ACTION_LISTEN_NAME, timestamp=3)], + False, + ), # providing a single `action_session_start` is not a session start - ([ActionExecuted(ACTION_SESSION_START_NAME)], False,), + ( + [ActionExecuted(ACTION_SESSION_START_NAME)], + False, + ), # providing no events is not a session start - ([], False,), + ( + [], + False, + ), ], ) def test_events_begin_with_session_start( - test_events: List[Event], begin_with_session_start: bool, + test_events: List[Event], + begin_with_session_start: bool, ): assert ( rasa.shared.core.events.do_events_begin_with_session_start(test_events) @@ -638,7 +662,8 @@ def test_print_end_to_end_events(end_to_end_event: Event): ], ) def test_event_executed_comparison( - events: List[Event], comparison_result: bool, + events: List[Event], + comparison_result: bool, ): result = all(event == events[0] for event in events) assert result == comparison_result @@ -692,7 +717,10 @@ def test_event_executed_comparison( UserUttered( text="hello", parse_data={ - "intent": {"name": "greet", "confidence": 0.9604260921478271,}, + "intent": { + "name": "greet", + "confidence": 0.9604260921478271, + }, "entities": [ {"entity": "city", "value": "London"}, {"entity": "count", "value": 1}, @@ -701,14 +729,38 @@ def test_event_executed_comparison( "message_id": "3f4c04602a4947098c574b107d3ccc50", "metadata": {}, "intent_ranking": [ - {"name": "greet", "confidence": 0.9604260921478271,}, - {"name": "goodbye", "confidence": 0.01835782080888748,}, - {"name": "deny", "confidence": 0.011255578137934208,}, - {"name": "bot_challenge", "confidence": 0.004019865766167641,}, - {"name": "affirm", "confidence": 0.002524246694520116,}, - {"name": "mood_great", "confidence": 0.002214624546468258,}, - {"name": "chitchat", "confidence": 0.0009614597074687481,}, - {"name": "mood_unhappy", "confidence": 0.00024030178610701114,}, + { + "name": "greet", + "confidence": 0.9604260921478271, + }, + { + "name": "goodbye", + "confidence": 0.01835782080888748, + }, + { + "name": "deny", + "confidence": 0.011255578137934208, + }, + { + "name": "bot_challenge", + "confidence": 0.004019865766167641, + }, + { + "name": "affirm", + "confidence": 0.002524246694520116, + }, + { + "name": "mood_great", + "confidence": 0.002214624546468258, + }, + { + "name": "chitchat", + "confidence": 0.0009614597074687481, + }, + { + "name": "mood_unhappy", + "confidence": 0.00024030178610701114, + }, ], "response_selector": { "all_retrieval_intents": [], @@ -771,7 +823,8 @@ def test_event_subclasses_are_tested(event_class: Type[Event]): @pytest.mark.parametrize( - "event", tested_events, + "event", + tested_events, ) def test_event_fingerprint_uniqueness(event: Event): f1 = event.fingerprint() diff --git a/tests/shared/core/test_slot_mappings.py b/tests/shared/core/test_slot_mappings.py index e82447fd4c88..e18cd0e8cca5 100644 --- a/tests/shared/core/test_slot_mappings.py +++ b/tests/shared/core/test_slot_mappings.py @@ -30,7 +30,10 @@ def test_slot_mapping_intent_is_desired(domain: Domain): tracker = DialogueStateTracker("sender_id_test", slots=domain.slots) event1 = UserUttered( text="I'd like to book a restaurant for 2 people.", - intent={"name": "request_restaurant", "confidence": 0.9604260921478271,}, + intent={ + "name": "request_restaurant", + "confidence": 0.9604260921478271, + }, entities=[{"entity": "number", "value": 2}], ) tracker.update(event1, domain) diff --git a/tests/shared/core/test_slots.py b/tests/shared/core/test_slots.py index 98c918e4326a..66d8c2b4b9d0 100644 --- a/tests/shared/core/test_slots.py +++ b/tests/shared/core/test_slots.py @@ -267,7 +267,9 @@ def value_feature_pair(self, request: SubRequest) -> Tuple[Any, List[float]]: @pytest.mark.parametrize("value", ["cat", ["cat"]]) def test_apply_single_item_to_slot( - self, value: Any, mappings: List[Dict[Text, Any]], + self, + value: Any, + mappings: List[Dict[Text, Any]], ): slot = self.create_slot(mappings=mappings, influence_conversation=False) tracker = DialogueStateTracker.from_events("sender", evts=[], slots=[slot]) @@ -375,7 +377,8 @@ def value_feature_pair(self, request: SubRequest) -> Tuple[Any, List[float]]: return request.param def test_exception_if_featurized( - self, mappings: List[Dict[Text, Any]], + self, + mappings: List[Dict[Text, Any]], ): with pytest.raises(InvalidSlotConfigError): AnySlot("⛔️", mappings=mappings, influence_conversation=True) diff --git a/tests/shared/core/training_data/story_reader/test_yaml_story_reader.py b/tests/shared/core/training_data/story_reader/test_yaml_story_reader.py index 7065e70b1aed..be4b37c89bf6 100644 --- a/tests/shared/core/training_data/story_reader/test_yaml_story_reader.py +++ b/tests/shared/core/training_data/story_reader/test_yaml_story_reader.py @@ -744,7 +744,9 @@ def test_read_story_file_with_cycles(domain: Domain): def test_generate_training_data_with_cycles(domain: Domain): featurizer = MaxHistoryTrackerFeaturizer(SingleStateFeaturizer(), max_history=4) training_trackers = training.load_data( - "data/test_yaml_stories/stories_with_cycle.yml", domain, augmentation_factor=0, + "data/test_yaml_stories/stories_with_cycle.yml", + domain, + augmentation_factor=0, ) _, label_ids, _ = featurizer.featurize_trackers( @@ -1088,7 +1090,9 @@ async def test_unpack_regex_message_has_correct_entity_start_and_end(): slot_1 = {entity: "Core"} text = f"/greet{json.dumps(slot_1)}" - message = Message(data={TEXT: text},) + message = Message( + data={TEXT: text}, + ) domain = Domain( intents=["greet"], diff --git a/tests/shared/core/training_data/test_visualization.py b/tests/shared/core/training_data/test_visualization.py index 569398494ad5..1529ba3be952 100644 --- a/tests/shared/core/training_data/test_visualization.py +++ b/tests/shared/core/training_data/test_visualization.py @@ -183,7 +183,11 @@ def test_story_visualization_with_merging(domain: Domain): "data/test_yaml_stories/stories.yml", domain ) generated_graph = visualization.visualize_stories( - story_steps, domain, output_file=None, max_history=3, should_merge_nodes=True, + story_steps, + domain, + output_file=None, + max_history=3, + should_merge_nodes=True, ) assert 15 < len(generated_graph.nodes()) < 33 diff --git a/tests/shared/importers/test_multi_project.py b/tests/shared/importers/test_multi_project.py index c61a0847f456..8c6afb2c0e03 100644 --- a/tests/shared/importers/test_multi_project.py +++ b/tests/shared/importers/test_multi_project.py @@ -81,7 +81,9 @@ def test_load_from_none(input_dict: Dict, tmp_path: Path): assert actual._imports == list() -def test_load_if_subproject_is_more_specific_than_parent(tmp_path: Path,): +def test_load_if_subproject_is_more_specific_than_parent( + tmp_path: Path, +): config_path = str(tmp_path / "config.yml") utils.dump_obj_as_yaml_to_file(tmp_path / "config.yml", {}) @@ -209,7 +211,9 @@ def test_not_importing_not_relevant_additional_files(tmp_path: Path): assert not selector.is_imported(str(not_relevant_file2)) -def test_not_importing_e2e_conversation_tests_in_project(tmp_path: Path,): +def test_not_importing_e2e_conversation_tests_in_project( + tmp_path: Path, +): config = {"imports": ["bots/Bot A"]} config_path = str(tmp_path / "config.yml") utils.dump_obj_as_yaml_to_file(config_path, config) diff --git a/tests/shared/nlu/training_data/test_message.py b/tests/shared/nlu/training_data/test_message.py index 589334e1c4e1..60366ad983e2 100644 --- a/tests/shared/nlu/training_data/test_message.py +++ b/tests/shared/nlu/training_data/test_message.py @@ -372,7 +372,8 @@ def test_features_present( ], ) def test_is_core_or_domain_message( - message: Message, result: bool, + message: Message, + result: bool, ): assert result == message.is_core_or_domain_message() @@ -395,7 +396,12 @@ def test_message_fingerprint_includes_data_and_features( assert fp1 != fp2 message.add_features( - Features(scipy.sparse.csr_matrix([1, 1, 0]), FEATURE_TYPE_SEQUENCE, TEXT, "c2",) + Features( + scipy.sparse.csr_matrix([1, 1, 0]), + FEATURE_TYPE_SEQUENCE, + TEXT, + "c2", + ) ) fp3 = message.fingerprint() diff --git a/tests/shared/test_data.py b/tests/shared/test_data.py index 2eae62d37a39..762243aa5ff8 100644 --- a/tests/shared/test_data.py +++ b/tests/shared/test_data.py @@ -119,7 +119,12 @@ def test_get_core_nlu_files(project): "data/examples/dialogflow/package.json", ], ), - ("luis", ["data/examples/luis/demo-restaurants_v7.json",],), + ( + "luis", + [ + "data/examples/luis/demo-restaurants_v7.json", + ], + ), ( "rasa", [ diff --git a/tests/shared/utils/schemas/test_events.py b/tests/shared/utils/schemas/test_events.py index a865ace4b310..5e3c7fecea10 100644 --- a/tests/shared/utils/schemas/test_events.py +++ b/tests/shared/utils/schemas/test_events.py @@ -47,8 +47,12 @@ "chitchat": { "response": { "id": -2223917631873543698, - "responses": [{"text": "I am called Retrieval Bot!"},], - "response_templates": [{"text": "I am called Retrieval Bot!"},], + "responses": [ + {"text": "I am called Retrieval Bot!"}, + ], + "response_templates": [ + {"text": "I am called Retrieval Bot!"}, + ], "confidence": 0.9660752415657043, "intent_response_key": "chitchat/ask_name", "utter_action": "utter_chitchat/ask_name", diff --git a/tests/test_memory_leak.py b/tests/test_memory_leak.py index f734ddfee0c2..8cdd1b44f9e6 100644 --- a/tests/test_memory_leak.py +++ b/tests/test_memory_leak.py @@ -58,7 +58,9 @@ def function_to_profile(self) -> None: @pytest.mark.timeout(720, func_only=True) def test_for_memory_leak( - self, name_for_dumped_files: Text, tmp_path: Path, + self, + name_for_dumped_files: Text, + tmp_path: Path, ) -> None: # Run as separate process to avoid other things affecting the memory usage. # Unfortunately `memory-profiler` doesn't work properly with diff --git a/tests/test_model_training.py b/tests/test_model_training.py index 5856481ab33c..e93136dd6151 100644 --- a/tests/test_model_training.py +++ b/tests/test_model_training.py @@ -82,7 +82,10 @@ def test_train_temp_files( # a new model because nothing has been changed. It also shouldn't create # any temp files. rasa.train( - domain_path, stack_config_path, [stories_path, nlu_data_path], output=output, + domain_path, + stack_config_path, + [stories_path, nlu_data_path], + output=output, ) assert count_temp_rasa_files(tempfile.tempdir) == 0 @@ -101,7 +104,10 @@ def test_train_core_temp_files( monkeypatch.setattr(tempfile, "tempdir", tmp_path / "training") rasa.model_training.train_core( - domain_path, stack_config_path, stories_path, output=str(tmp_path / "models"), + domain_path, + stack_config_path, + stories_path, + output=str(tmp_path / "models"), ) assert count_temp_rasa_files(tempfile.tempdir) == 0 @@ -244,7 +250,9 @@ def test_train_nlu_autoconfig( monkeypatch.setattr(GraphTrainer, GraphTrainer.train.__name__, Mock()) # do training rasa.model_training.train_nlu( - stack_config_path, nlu_data_path, output="test_train_nlu_temp_files_models", + stack_config_path, + nlu_data_path, + output="test_train_nlu_temp_files_models", ) mocked_get_configuration.assert_called_once() @@ -480,7 +488,10 @@ def test_training_core_with_e2e_fails_gracefully( e2e_stories_path: Text, ): rasa.model_training.train_core( - domain_path, stack_config_path, e2e_stories_path, output=str(tmp_path), + domain_path, + stack_config_path, + e2e_stories_path, + output=str(tmp_path), ) assert not list(tmp_path.glob("*")) @@ -601,7 +612,9 @@ def test_model_finetuning_core_with_default_epochs( def test_model_finetuning_core_new_domain_label( - tmp_path: Path, monkeypatch: MonkeyPatch, trained_moodbot_path: Text, + tmp_path: Path, + monkeypatch: MonkeyPatch, + trained_moodbot_path: Text, ): (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -623,7 +636,8 @@ def test_model_finetuning_core_new_domain_label( def test_model_finetuning_new_domain_label_stops_all_training( - tmp_path: Path, trained_moodbot_path: Text, + tmp_path: Path, + trained_moodbot_path: Text, ): (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -690,7 +704,8 @@ def test_model_finetuning_nlu( def test_model_finetuning_nlu_new_label( - tmp_path: Path, trained_nlu_moodbot_path: Text, + tmp_path: Path, + trained_nlu_moodbot_path: Text, ): (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -711,7 +726,8 @@ def test_model_finetuning_nlu_new_label( def test_model_finetuning_nlu_new_entity( - tmp_path: Path, trained_nlu_moodbot_path: Text, + tmp_path: Path, + trained_nlu_moodbot_path: Text, ): (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -758,7 +774,8 @@ def test_model_finetuning_nlu_new_label_already_in_domain( def test_model_finetuning_nlu_new_label_to_domain_only( - tmp_path: Path, trained_nlu_moodbot_path: Text, + tmp_path: Path, + trained_nlu_moodbot_path: Text, ): (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -955,7 +972,10 @@ def test_models_not_retrained_if_only_new_action( def test_invalid_graph_schema( - tmp_path: Path, domain_path: Text, stories_path: Text, nlu_data_path: Text, + tmp_path: Path, + domain_path: Text, + stories_path: Text, + nlu_data_path: Text, ): config = textwrap.dedent( """ @@ -1016,7 +1036,10 @@ def test_fingerprint_changes_if_module_changes( # Train to initialize cache rasa.train( - domain_path, str(new_config_path), [stories_path], output=str(tmp_path), + domain_path, + str(new_config_path), + [stories_path], + output=str(tmp_path), ) # Make sure that the caching works as expected the code didn't change diff --git a/tests/test_server.py b/tests/test_server.py index a46723a27806..030351eb381b 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -235,7 +235,9 @@ def run_server(monkeypatch: MonkeyPatch) -> NoReturn: import sys monkeypatch.setattr( - sys.modules["rasa.model_training"], "train", mocked_training_function, + sys.modules["rasa.model_training"], + "train", + mocked_training_function, ) from rasa import __main__ @@ -439,7 +441,9 @@ async def test_parse_without_nlu_model(rasa_app_core: SanicASGITestClient): assert all(prop in rjs for prop in ["entities", "intent", "text"]) -async def test_parse_on_invalid_emulation_mode(rasa_app: SanicASGITestClient,): +async def test_parse_on_invalid_emulation_mode( + rasa_app: SanicASGITestClient, +): _, response = await rasa_app.post( "/model/parse?emulation_mode=ANYTHING", json={"text": "hello"} ) @@ -557,7 +561,8 @@ async def test_train_with_retrieval_events_success( def assert_trained_model( - response_body: bytes, tmp_path_factory: TempPathFactory, + response_body: bytes, + tmp_path_factory: TempPathFactory, ) -> None: # save model to temporary file @@ -573,7 +578,8 @@ def assert_trained_model( async def test_train_with_yaml( - rasa_app: SanicASGITestClient, tmp_path_factory: TempPathFactory, + rasa_app: SanicASGITestClient, + tmp_path_factory: TempPathFactory, ): training_data = """ version: "3.0" @@ -791,7 +797,9 @@ async def test_evaluate_stories_end_to_end( } -async def test_add_message(rasa_app: SanicASGITestClient,): +async def test_add_message( + rasa_app: SanicASGITestClient, +): conversation_id = "test_add_message_test_id" @@ -858,7 +866,8 @@ async def test_evaluate_invalid_intent_model_file(rasa_app: SanicASGITestClient) async def test_evaluate_intent_without_body(rasa_app: SanicASGITestClient): _, response = await rasa_app.post( - "/model/test/intents", headers={"Content-type": rasa.server.YAML_CONTENT_TYPE}, + "/model/test/intents", + headers={"Content-type": rasa.server.YAML_CONTENT_TYPE}, ) assert response.status == HTTPStatus.BAD_REQUEST @@ -1180,12 +1189,15 @@ async def test_predict_invalid_entities_format(rasa_app: SanicASGITestClient): async def test_predict_empty_request_body(rasa_app: SanicASGITestClient): _, response = await rasa_app.post( - "/model/predict", headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE}, + "/model/predict", + headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE}, ) assert response.status == HTTPStatus.BAD_REQUEST -async def test_append_events_empty_request_body(rasa_app: SanicASGITestClient,): +async def test_append_events_empty_request_body( + rasa_app: SanicASGITestClient, +): _, response = await rasa_app.post( "/conversations/testid/tracker/events", headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE}, diff --git a/tests/test_validator.py b/tests/test_validator.py index 95a42d6beec8..4226a2345922 100644 --- a/tests/test_validator.py +++ b/tests/test_validator.py @@ -52,7 +52,8 @@ def test_verify_nlu_with_e2e_story(tmp_path: Path, nlu_data_path: Path): def test_verify_intents_does_not_fail_on_valid_data(nlu_data_path: Text): importer = RasaFileImporter( - domain_path="data/test_moodbot/domain.yml", training_data_paths=[nlu_data_path], + domain_path="data/test_moodbot/domain.yml", + training_data_paths=[nlu_data_path], ) validator = Validator.from_importer(importer) assert validator.verify_intents() @@ -94,7 +95,8 @@ def test_verify_valid_responses_in_rules(nlu_data_path: Text): def test_verify_story_structure(stories_path: Text): importer = RasaFileImporter( - domain_path="data/test_domains/default.yml", training_data_paths=[stories_path], + domain_path="data/test_domains/default.yml", + training_data_paths=[stories_path], ) validator = Validator.from_importer(importer) assert validator.verify_story_structure(ignore_warnings=False) @@ -221,7 +223,8 @@ def test_verify_there_is_example_repetition_in_intents(nlu_data_path: Text): # moodbot nlu data already has duplicated example 'good afternoon' # for intents greet and goodbye importer = RasaFileImporter( - domain_path="data/test_moodbot/domain.yml", training_data_paths=[nlu_data_path], + domain_path="data/test_moodbot/domain.yml", + training_data_paths=[nlu_data_path], ) validator = Validator.from_importer(importer) assert not validator.verify_example_repetition_in_intents(False) @@ -269,7 +272,8 @@ def test_verify_logging_message_for_repetition_in_intents(caplog, nlu_data_path: # moodbot nlu data already has duplicated example 'good afternoon' # for intents greet and goodbye importer = RasaFileImporter( - domain_path="data/test_moodbot/domain.yml", training_data_paths=[nlu_data_path], + domain_path="data/test_moodbot/domain.yml", + training_data_paths=[nlu_data_path], ) validator = Validator.from_importer(importer) caplog.clear() # clear caplog to avoid counting earlier debug messages @@ -325,7 +329,8 @@ def test_verify_actions_in_stories_not_in_domain(tmp_path: Path, domain_path: Te ) importer = RasaFileImporter( - domain_path=domain_path, training_data_paths=[story_file_name], + domain_path=domain_path, + training_data_paths=[story_file_name], ) validator = Validator.from_importer(importer) with pytest.warns(UserWarning) as warning: @@ -351,7 +356,8 @@ def test_verify_actions_in_rules_not_in_domain(tmp_path: Path, domain_path: Text """ ) importer = RasaFileImporter( - domain_path=domain_path, training_data_paths=[rules_file_name], + domain_path=domain_path, + training_data_paths=[rules_file_name], ) validator = Validator.from_importer(importer) with pytest.warns(UserWarning) as warning: @@ -446,7 +452,10 @@ def test_valid_stories_rules_actions_in_domain( - action: action_greet """ ) - importer = RasaFileImporter(domain_path=domain, training_data_paths=[file_name],) + importer = RasaFileImporter( + domain_path=domain, + training_data_paths=[file_name], + ) validator = Validator.from_importer(importer) assert validator.verify_actions_in_stories_rules() @@ -476,7 +485,10 @@ def test_valid_stories_rules_default_actions( - action: action_restart """ ) - importer = RasaFileImporter(domain_path=domain, training_data_paths=[file_name],) + importer = RasaFileImporter( + domain_path=domain, + training_data_paths=[file_name], + ) validator = Validator.from_importer(importer) assert validator.verify_actions_in_stories_rules() diff --git a/tests/train.py b/tests/train.py index ea22293a0eed..e82da7f18be3 100644 --- a/tests/train.py +++ b/tests/train.py @@ -62,7 +62,9 @@ def pipelines_for_tests() -> List[Tuple[Text, List[Dict[Text, Any]]]]: ( "en", as_pipeline( - "WhitespaceTokenizer", "LanguageModelFeaturizer", "DIETClassifier", + "WhitespaceTokenizer", + "LanguageModelFeaturizer", + "DIETClassifier", ), ), ("fallback", as_pipeline("KeywordIntentClassifier", "FallbackClassifier")), diff --git a/tests/utils/tensorflow/test_callback.py b/tests/utils/tensorflow/test_callback.py index 433385bf2b49..eca62da8743a 100644 --- a/tests/utils/tensorflow/test_callback.py +++ b/tests/utils/tensorflow/test_callback.py @@ -59,7 +59,8 @@ def test_does_model_improve( @pytest.fixture(scope="module") def trained_ted( - tmp_path_factory: TempPathFactory, moodbot_domain_path: Path, + tmp_path_factory: TempPathFactory, + moodbot_domain_path: Path, ) -> TEDPolicy: training_files = "data/test_moodbot/data/stories.yml" domain = Domain.load(moodbot_domain_path) diff --git a/tests/utils/tensorflow/test_layers.py b/tests/utils/tensorflow/test_layers.py index 15bc2a885da3..01a24f84db0c 100644 --- a/tests/utils/tensorflow/test_layers.py +++ b/tests/utils/tensorflow/test_layers.py @@ -29,7 +29,10 @@ def test_dot_product_loss_inner_sim(): - layer = DotProductLoss(0, similarity_type=INNER,) + layer = DotProductLoss( + 0, + similarity_type=INNER, + ) a = tf.constant([[[1.0, 0.0, 2.0]], [[1.0, 0.0, 2.0]]]) b = tf.constant([[[1.0, 0.0, -2.0]], [[1.0, 0.0, -2.0]]]) mask = tf.constant([[1.0, 0.0]]) @@ -243,7 +246,10 @@ def mock_random_indices(*args, **kwargs) -> tf.Tensor: pos_neg_labels.numpy() == np.array( [ - [1, 0,], # l0 is an actual positive example in `batch_labels_embed[0]`, + [ + 1, + 0, + ], # l0 is an actual positive example in `batch_labels_embed[0]`, # whereas l2 is not [ 0, @@ -330,13 +336,19 @@ def mock_random_indices(*args, **kwargs) -> tf.Tensor: pos_neg_labels.numpy() == np.array( [ - [1, 0,], # l0 is an actual positive example in `batch_labels_embed[0]`, + [ + 1, + 0, + ], # l0 is an actual positive example in `batch_labels_embed[0]`, # whereas l2 is not [ 0, 0, ], # Neither l0 nor l1 are positive examples in `batch_labels_embed[1]` - [1, 0,], # l3 is an actual positive example in `batch_labels_embed[2]`, + [ + 1, + 0, + ], # l3 is an actual positive example in `batch_labels_embed[2]`, # whereas l1 is not ] ) @@ -459,7 +471,10 @@ def test_randomly_connected_dense_all_inputs_connected(): def test_dense_for_sparse_get_feature_type( layer_name: Text, expected_feature_type: Union[Text, None] ): - layer = DenseForSparse(name=layer_name, units=10,) + layer = DenseForSparse( + name=layer_name, + units=10, + ) assert layer.get_feature_type() == expected_feature_type @@ -489,5 +504,8 @@ def test_dense_for_sparse_get_feature_type( def test_dense_for_sparse_get_attribute( layer_name: Text, expected_attribute: Union[Text, None] ): - layer = DenseForSparse(name=layer_name, units=10,) + layer = DenseForSparse( + name=layer_name, + units=10, + ) assert layer.get_attribute() == expected_attribute diff --git a/tests/utils/tensorflow/test_model_data_utils.py b/tests/utils/tensorflow/test_model_data_utils.py index 58e264e75d72..3bd62e07856b 100644 --- a/tests/utils/tensorflow/test_model_data_utils.py +++ b/tests/utils/tensorflow/test_model_data_utils.py @@ -258,7 +258,9 @@ def test_convert_training_examples( ) ] output, sparse_feature_sizes = model_data_utils.featurize_training_examples( - [message], attributes=attributes, entity_tag_specs=entity_tag_spec, + [message], + attributes=attributes, + entity_tag_specs=entity_tag_spec, ) assert len(output) == 1 diff --git a/tests/utils/test_common.py b/tests/utils/test_common.py index e7f5d17d169c..37316047c2c4 100644 --- a/tests/utils/test_common.py +++ b/tests/utils/test_common.py @@ -131,7 +131,10 @@ def test_find_unavailable_packages(): [ (Path, "pathlib.Path"), (Agent, "rasa.core.agent.Agent"), - (DIETClassifier, "rasa.nlu.classifiers.diet_classifier.DIETClassifier",), + ( + DIETClassifier, + "rasa.nlu.classifiers.diet_classifier.DIETClassifier", + ), ], ) def test_module_path_from_class(clazz: Type, module_path: Text): diff --git a/tests/utils/test_endpoints.py b/tests/utils/test_endpoints.py index c945e78b1bf3..697266557c6a 100644 --- a/tests/utils/test_endpoints.py +++ b/tests/utils/test_endpoints.py @@ -99,14 +99,18 @@ async def test_endpoint_config_with_cafile(tmp_path: Path): with aioresponses() as mocked: endpoint = endpoint_utils.EndpointConfig( - "https://example.com/", cafile=str(cafile), + "https://example.com/", + cafile=str(cafile), ) mocked.post( - "https://example.com/", status=200, + "https://example.com/", + status=200, ) - await endpoint.request("post",) + await endpoint.request( + "post", + ) request = latest_request(mocked, "post", "https://example.com/")[-1] @@ -119,11 +123,14 @@ async def test_endpoint_config_with_non_existent_cafile(tmp_path: Path): cafile = "data/test_endpoints/no_file.pem" endpoint = endpoint_utils.EndpointConfig( - "https://example.com/", cafile=str(cafile), + "https://example.com/", + cafile=str(cafile), ) with pytest.raises(FileNotFoundException): - await endpoint.request("post",) + await endpoint.request( + "post", + ) def test_endpoint_config_default_token_name(): @@ -160,7 +167,9 @@ async def test_request_non_json_response(): @pytest.mark.parametrize( "filename, endpoint_type", - [("data/test_endpoints/example_endpoints.yml", "tracker_store"),], + [ + ("data/test_endpoints/example_endpoints.yml", "tracker_store"), + ], ) def test_read_endpoint_config(filename: Text, endpoint_type: Text): conf = endpoint_utils.read_endpoint_config(filename, endpoint_type) diff --git a/tests/utils/test_io.py b/tests/utils/test_io.py index a0b3ff172b8a..7857c67ed50b 100644 --- a/tests/utils/test_io.py +++ b/tests/utils/test_io.py @@ -104,7 +104,9 @@ def test_directories_are_equal(tmp_path_factory: TempPathFactory): assert rasa.utils.io.are_directories_equal(dir1, dir2) -def test_directories_are_equal_sub_dir(tmp_path_factory: TempPathFactory,): +def test_directories_are_equal_sub_dir( + tmp_path_factory: TempPathFactory, +): dir1 = tmp_path_factory.mktemp("dir1") (dir1 / "dir").mkdir() (dir1 / "dir" / "file.txt").write_text("Hello!") @@ -128,7 +130,9 @@ def test_directories_are_equal_different_file_content( assert not rasa.utils.io.are_directories_equal(dir1, dir2) -def test_directories_are_equal_extra_file(tmp_path_factory: TempPathFactory,): +def test_directories_are_equal_extra_file( + tmp_path_factory: TempPathFactory, +): dir1 = tmp_path_factory.mktemp("dir1") (dir1 / "file.txt").write_text("Hello!") diff --git a/tests/utils/test_plotting.py b/tests/utils/test_plotting.py index 87b14e0e12f1..f2260a4a4eb2 100644 --- a/tests/utils/test_plotting.py +++ b/tests/utils/test_plotting.py @@ -30,7 +30,11 @@ def test_paired_histogram_specification_bins( @pytest.mark.parametrize( - "bad_data", [([[]]), ([[], []]),], + "bad_data", + [ + ([[]]), + ([[], []]), + ], ) def test_paired_histogram_specification_bins_raises(bad_data: List): """`_extract_paired_histogram_specification` raises a ValueError on empty data""" @@ -46,7 +50,11 @@ def test_paired_histogram_specification_bins_raises(bad_data: List): @pytest.mark.parametrize( - "bad_data", [([[]]), ([[], []]),], + "bad_data", + [ + ([[]]), + ([[], []]), + ], ) def test_plot_paired_histogram_warns_on_bad_data(bad_data: List): """Empty data shouldn't raise an error.""" @@ -94,7 +102,11 @@ def test_paired_histogram_specification_histograms( expected_histograms: List[List[float]], ): _, histograms, _, _, = rasa.utils.plotting._extract_paired_histogram_specification( - data, num_bins=num_bins, density=density, x_pad_fraction=0, y_pad_fraction=0, + data, + num_bins=num_bins, + density=density, + x_pad_fraction=0, + y_pad_fraction=0, ) assert np.all(histograms[0] == expected_histograms[0]) assert np.all(histograms[1] == expected_histograms[1]) diff --git a/tests/utils/test_train_utils.py b/tests/utils/test_train_utils.py index 6527060b30a4..4dc9f5d7a043 100644 --- a/tests/utils/test_train_utils.py +++ b/tests/utils/test_train_utils.py @@ -114,7 +114,8 @@ def test_rank_and_mask( ], ) def test_init_split_entities_config( - split_entities_config: Any, expected_initialized_config: Dict[(str, bool)], + split_entities_config: Any, + expected_initialized_config: Dict[(str, bool)], ): assert ( train_utils.init_split_entities( @@ -242,7 +243,8 @@ def test_update_confidence_type( ], ) def test_tolerance_setting( - component_config: Dict[Text, float], raises_exception: bool, + component_config: Dict[Text, float], + raises_exception: bool, ): if raises_exception: with pytest.raises(InvalidConfigException): From 2bdfd76864f94579a21cb3c5f0208425f1a7c63b Mon Sep 17 00:00:00 2001 From: carlad Date: Wed, 12 Jan 2022 13:57:02 +0100 Subject: [PATCH 5/9] add changelog --- changelog/592.misc.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog/592.misc.md diff --git a/changelog/592.misc.md b/changelog/592.misc.md new file mode 100644 index 000000000000..b8efb1de7a47 --- /dev/null +++ b/changelog/592.misc.md @@ -0,0 +1 @@ +Update `black` version to align with version used by `rasa-sdk`. \ No newline at end of file From 0c7fcde189adcd05c7f99ad070d6e191cdca34f1 Mon Sep 17 00:00:00 2001 From: carlad Date: Wed, 12 Jan 2022 14:11:03 +0100 Subject: [PATCH 6/9] fix qa ignore --- rasa/server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rasa/server.py b/rasa/server.py index 7d08f6822a9d..2f800237082d 100644 --- a/rasa/server.py +++ b/rasa/server.py @@ -79,8 +79,8 @@ response.HTTPResponse, Coroutine[Any, Any, response.HTTPResponse] ] SanicView = Callable[ - [Arg(Request, "request"), VarArg(), KwArg()], - SanicResponse, # noqa: F821 + [Arg(Request, "request"), VarArg(), KwArg()], # noqa: F821 + SanicResponse, ] From 98e8a3ff032315baf7ab8aa3eda6fa7429ad1b40 Mon Sep 17 00:00:00 2001 From: carlad Date: Wed, 12 Jan 2022 16:39:31 +0100 Subject: [PATCH 7/9] correct version of black in pre-commit config --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d2870c46d47d..5ca78bee5082 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/ambv/black - rev: 21.12b0 + rev: 21.7b0 hooks: - id: black - repo: https://github.com/thlorenz/doctoc From 8436f1054a3c26c337a8c1df92d2cf7f59701fab Mon Sep 17 00:00:00 2001 From: carlad Date: Wed, 12 Jan 2022 18:37:39 +0100 Subject: [PATCH 8/9] config line length --- .pre-commit-config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5ca78bee5082..4943390df90d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,6 +3,7 @@ repos: rev: 21.7b0 hooks: - id: black + args: [--line-length=88] - repo: https://github.com/thlorenz/doctoc rev: v2.1.0 hooks: From a9932b4a003529f24031782c032fa9ef8d040919 Mon Sep 17 00:00:00 2001 From: carlad Date: Fri, 14 Jan 2022 17:32:56 +0100 Subject: [PATCH 9/9] black magic-comma format --- poetry.lock | 764 ++++++++++-------- pyproject.toml | 2 +- rasa/cli/arguments/evaluate.py | 6 +- rasa/cli/arguments/run.py | 2 +- rasa/cli/data.py | 7 +- rasa/cli/evaluate.py | 2 +- rasa/cli/scaffold.py | 2 +- rasa/cli/telemetry.py | 3 +- rasa/cli/test.py | 4 +- rasa/cli/x.py | 4 +- rasa/core/actions/action.py | 10 +- rasa/core/actions/forms.py | 4 +- rasa/core/agent.py | 21 +- rasa/core/brokers/kafka.py | 2 +- rasa/core/channels/socketio.py | 8 +- rasa/core/channels/twilio_voice.py | 6 +- rasa/core/evaluation/marker_base.py | 8 +- rasa/core/evaluation/marker_stats.py | 8 +- rasa/core/featurizers/precomputation.py | 2 +- .../featurizers/single_state_featurizer.py | 8 +- rasa/core/featurizers/tracker_featurizers.py | 4 +- rasa/core/http_interpreter.py | 2 +- rasa/core/migrate.py | 16 +- rasa/core/nlg/response.py | 2 +- rasa/core/policies/ensemble.py | 2 +- rasa/core/policies/memoization.py | 14 +- rasa/core/policies/policy.py | 9 +- rasa/core/policies/rule_policy.py | 12 +- rasa/core/policies/ted_policy.py | 25 +- .../core/policies/unexpected_intent_policy.py | 20 +- rasa/core/processor.py | 10 +- rasa/core/run.py | 4 +- rasa/core/test.py | 6 +- rasa/core/tracker_store.py | 10 +- rasa/core/training/__init__.py | 4 +- rasa/core/training/interactive.py | 4 +- rasa/core/training/story_conflict.py | 4 +- rasa/core/utils.py | 12 +- rasa/engine/caching.py | 6 +- rasa/engine/graph.py | 7 +- rasa/engine/recipes/default_components.py | 5 +- rasa/engine/recipes/default_recipe.py | 20 +- rasa/engine/runner/dask.py | 9 +- rasa/engine/storage/local_model_storage.py | 20 +- rasa/engine/storage/storage.py | 2 +- rasa/engine/training/components.py | 6 +- rasa/engine/training/graph_trainer.py | 7 +- rasa/engine/validation.py | 16 +- .../domain_for_core_training_provider.py | 7 +- .../providers/nlu_training_data_provider.py | 4 +- .../providers/story_graph_provider.py | 6 +- .../validators/default_recipe_validator.py | 22 +- .../validators/finetuning_validator.py | 8 +- rasa/model.py | 6 +- rasa/model_testing.py | 11 +- rasa/model_training.py | 16 +- rasa/nlu/classifiers/diet_classifier.py | 4 +- rasa/nlu/classifiers/fallback_classifier.py | 2 +- .../classifiers/keyword_intent_classifier.py | 4 +- .../classifiers/sklearn_intent_classifier.py | 2 +- rasa/nlu/constants.py | 5 +- rasa/nlu/emulators/dialogflow.py | 2 +- rasa/nlu/extractors/entity_synonyms.py | 2 +- rasa/nlu/extractors/mitie_entity_extractor.py | 10 +- rasa/nlu/extractors/regex_entity_extractor.py | 2 +- .../dense_featurizer/convert_featurizer.py | 7 +- .../dense_featurizer/lm_featurizer.py | 9 +- .../dense_featurizer/mitie_featurizer.py | 2 +- .../dense_featurizer/spacy_featurizer.py | 2 +- rasa/nlu/featurizers/featurizer.py | 2 +- .../count_vectors_featurizer.py | 9 +- .../lexical_syntactic_featurizer.py | 20 +- .../sparse_featurizer/regex_featurizer.py | 8 +- rasa/nlu/selectors/response_selector.py | 2 +- rasa/nlu/test.py | 6 +- rasa/nlu/tokenizers/jieba_tokenizer.py | 2 +- rasa/nlu/utils/spacy_utils.py | 2 +- rasa/server.py | 8 +- rasa/shared/core/domain.py | 20 +- rasa/shared/core/events.py | 10 +- rasa/shared/core/generator.py | 5 +- rasa/shared/core/slot_mappings.py | 6 +- rasa/shared/core/slots.py | 9 +- rasa/shared/core/trackers.py | 4 +- rasa/shared/core/training_data/loading.py | 10 +- .../story_reader/story_reader.py | 2 +- .../story_reader/yaml_story_reader.py | 2 +- .../story_writer/yaml_story_writer.py | 13 +- rasa/shared/core/training_data/structures.py | 10 +- .../core/training_data/visualization.py | 2 +- rasa/shared/data.py | 2 +- rasa/shared/importers/importer.py | 10 +- rasa/shared/importers/multi_project.py | 4 +- rasa/shared/importers/rasa.py | 6 +- rasa/shared/importers/utils.py | 2 +- rasa/shared/nlu/constants.py | 5 +- rasa/shared/nlu/training_data/features.py | 8 +- .../nlu/training_data/formats/dialogflow.py | 4 +- rasa/shared/nlu/training_data/util.py | 4 +- rasa/shared/utils/pykwalify_extensions.py | 2 +- rasa/shared/utils/schemas/events.py | 14 +- rasa/telemetry.py | 17 +- rasa/utils/common.py | 4 +- rasa/utils/io.py | 11 +- rasa/utils/plotting.py | 2 +- rasa/utils/tensorflow/layers.py | 21 +- rasa/utils/tensorflow/layers_utils.py | 2 +- rasa/utils/tensorflow/model_data.py | 12 +- rasa/utils/tensorflow/models.py | 26 +- rasa/utils/tensorflow/rasa_layers.py | 2 +- rasa/utils/tensorflow/temp_keras_modules.py | 11 +- rasa/utils/train_utils.py | 2 +- rasa/validator.py | 2 +- tests/cli/test_rasa_data.py | 7 +- tests/cli/test_rasa_evaluate_markers.py | 2 +- tests/cli/test_rasa_init.py | 2 +- tests/cli/test_rasa_test.py | 6 +- tests/cli/test_rasa_train.py | 9 +- tests/cli/test_rasa_x.py | 2 +- tests/conftest.py | 8 +- tests/core/actions/test_forms.py | 35 +- tests/core/conftest.py | 2 +- tests/core/evaluation/test_marker.py | 26 +- tests/core/evaluation/test_marker_stats.py | 8 +- tests/core/featurizers/test_precomputation.py | 4 +- .../test_single_state_featurizers.py | 19 +- .../featurizers/test_tracker_featurizer.py | 134 +-- tests/core/nlg/test_response.py | 6 +- tests/core/policies/test_rule_policy.py | 28 +- tests/core/policies/test_ted_policy.py | 43 +- .../policies/test_unexpected_intent_policy.py | 34 +- tests/core/test_actions.py | 58 +- tests/core/test_agent.py | 8 +- tests/core/test_ensemble.py | 4 +- tests/core/test_evaluation.py | 22 +- tests/core/test_migrate.py | 14 +- tests/core/test_policies.py | 38 +- tests/core/test_processor.py | 23 +- tests/core/test_test.py | 8 +- tests/core/test_tracker_stores.py | 12 +- tests/core/test_training.py | 2 +- tests/core/training/test_interactive.py | 12 +- tests/core/training/test_story_conflict.py | 6 +- tests/dialogues.py | 10 +- tests/docs/test_docs_use_base_url.py | 2 +- tests/engine/recipes/test_default_recipe.py | 16 +- tests/engine/runner/test_dask.py | 20 +- .../storage/test_local_model_storage.py | 10 +- tests/engine/storage/test_storage.py | 2 +- tests/engine/test_caching.py | 2 +- tests/engine/test_graph.py | 4 +- tests/engine/test_graph_node.py | 7 +- tests/engine/test_loader.py | 6 +- tests/engine/test_validation.py | 84 +- tests/engine/training/test_fingerprinting.py | 12 +- tests/engine/training/test_graph_trainer.py | 36 +- tests/engine/training/test_hooks.py | 9 +- .../converters/test_nlu_message_converter.py | 8 +- .../providers/test_story_graph_provider.py | 2 +- .../test_default_recipe_validator.py | 46 +- .../validators/test_finetuning_validator.py | 23 +- tests/nlu/classifiers/test_diet_classifier.py | 42 +- .../classifiers/test_fallback_classifier.py | 2 +- .../classifiers/test_keyword_classifier.py | 12 +- .../classifiers/test_regex_message_handler.py | 7 +- .../classifiers/test_sklearn_classifier.py | 4 +- tests/nlu/conftest.py | 10 +- .../test_duckling_entity_extractor.py | 2 +- tests/nlu/extractors/test_extractor.py | 4 +- .../extractors/test_mitie_entity_extractor.py | 2 +- .../extractors/test_regex_entity_extractor.py | 10 +- .../featurizers/test_convert_featurizer.py | 22 +- .../test_count_vectors_featurizer.py | 12 +- tests/nlu/featurizers/test_featurizer.py | 4 +- .../test_lexical_syntactic_featurizer.py | 16 +- tests/nlu/featurizers/test_lm_featurizer.py | 38 +- .../nlu/featurizers/test_mitie_featurizer.py | 2 +- .../nlu/featurizers/test_regex_featurizer.py | 14 +- .../nlu/featurizers/test_spacy_featurizer.py | 2 +- tests/nlu/selectors/test_selectors.py | 30 +- tests/nlu/test_evaluation.py | 10 +- tests/nlu/test_spacy_utils.py | 2 +- tests/nlu/test_train.py | 12 +- tests/nlu/tokenizers/test_jieba_tokenizer.py | 2 +- tests/nlu/tokenizers/test_tokenizer.py | 2 +- .../tokenizers/test_whitespace_tokenizer.py | 4 +- tests/nlu/utils/test_mitie_utils.py | 2 +- tests/shared/core/test_constants.py | 9 +- tests/shared/core/test_domain.py | 12 +- tests/shared/core/test_events.py | 44 +- tests/shared/core/test_slot_mappings.py | 2 +- tests/shared/core/test_slots.py | 6 +- tests/shared/core/test_trackers.py | 6 +- .../story_reader/test_yaml_story_reader.py | 4 +- .../core/training_data/test_visualization.py | 2 +- tests/shared/importers/test_multi_project.py | 4 +- .../shared/nlu/training_data/test_message.py | 6 +- .../nlu/training_data/test_training_data.py | 16 +- tests/shared/test_data.py | 2 +- tests/shared/utils/schemas/test_events.py | 8 +- tests/test_memory_leak.py | 9 +- tests/test_model_training.py | 26 +- tests/test_server.py | 16 +- tests/test_telemetry.py | 2 +- tests/test_validator.py | 19 +- tests/train.py | 2 +- tests/utils/tensorflow/test_callback.py | 2 +- tests/utils/tensorflow/test_layers.py | 21 +- .../utils/tensorflow/test_model_data_utils.py | 10 +- tests/utils/tensorflow/test_models.py | 21 +- tests/utils/tensorflow/test_rasa_layers.py | 56 +- tests/utils/test_common.py | 2 +- tests/utils/test_endpoints.py | 16 +- tests/utils/test_io.py | 4 +- tests/utils/test_plotting.py | 20 +- tests/utils/test_train_utils.py | 6 +- 216 files changed, 1327 insertions(+), 1765 deletions(-) diff --git a/poetry.lock b/poetry.lock index 7cd403270156..b33d09b7c092 100644 --- a/poetry.lock +++ b/poetry.lock @@ -11,11 +11,11 @@ six = "*" [[package]] name = "aio-pika" -version = "6.8.0" +version = "6.8.1" description = "Wrapper for the aiormq for asyncio and humans." category = "main" optional = false -python-versions = ">3.5.*, <4" +python-versions = ">=3.5, <4" [package.dependencies] aiormq = ">=3.2.3,<4" @@ -53,7 +53,7 @@ speedups = ["aiodns", "brotlipy", "cchardet"] [[package]] name = "aioresponses" -version = "0.7.2" +version = "0.7.3" description = "Mock out requests made by ClientSession from aiohttp package" category = "dev" optional = false @@ -79,7 +79,7 @@ develop = ["aiomisc (>=11.0,<12.0)", "async-generator", "coverage (!=4.3)", "cov [[package]] name = "anyio" -version = "3.4.0" +version = "3.5.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" category = "dev" optional = false @@ -91,7 +91,7 @@ sniffio = ">=1.1" typing-extensions = {version = "*", markers = "python_version < \"3.8\""} [package.extras] -doc = ["sphinx-rtd-theme", "sphinx-autodoc-typehints (>=1.2.0)"] +doc = ["packaging", "sphinx-rtd-theme", "sphinx-autodoc-typehints (>=1.2.0)"] test = ["coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "pytest (>=6.0)", "pytest-mock (>=3.6.1)", "trustme", "contextlib2", "uvloop (<0.15)", "mock (>=4)", "uvloop (>=0.15)"] trio = ["trio (>=0.16)"] @@ -180,7 +180,7 @@ tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (> [[package]] name = "azure-core" -version = "1.21.0" +version = "1.21.1" description = "Microsoft Azure Core Library for Python" category = "dev" optional = false @@ -227,23 +227,27 @@ python-versions = ">=3.6" [[package]] name = "black" -version = "19.10b0" +version = "21.7b0" description = "The uncompromising code formatter." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.6.2" [package.dependencies] appdirs = "*" -attrs = ">=18.1.0" -click = ">=6.5" -pathspec = ">=0.6,<1" -regex = "*" -toml = ">=0.9.4" -typed-ast = ">=1.4.0" +click = ">=7.1.2" +mypy-extensions = ">=0.4.3" +pathspec = ">=0.8.1,<1" +regex = ">=2020.1.8" +tomli = ">=0.2.6,<2.0.0" +typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\""} +typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} [package.extras] -d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.6.0)", "aiohttp-cors (>=0.4.0)"] +python2 = ["typed-ast (>=1.4.2)"] +uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "blis" @@ -258,14 +262,14 @@ numpy = ">=1.15.0" [[package]] name = "boto3" -version = "1.20.20" +version = "1.20.35" description = "The AWS SDK for Python" category = "main" optional = false python-versions = ">= 3.6" [package.dependencies] -botocore = ">=1.23.20,<1.24.0" +botocore = ">=1.23.35,<1.24.0" jmespath = ">=0.7.1,<1.0.0" s3transfer = ">=0.5.0,<0.6.0" @@ -274,7 +278,7 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.23.20" +version = "1.23.35" description = "Low-level, data-driven core of boto 3." category = "main" optional = false @@ -404,11 +408,11 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "colorclass" -version = "2.2.0" +version = "2.2.2" description = "Colorful worry-free console applications for Linux, Mac OS X, and Windows." category = "main" optional = false -python-versions = "*" +python-versions = ">=2.6" [[package]] name = "coloredlogs" @@ -528,7 +532,7 @@ python-versions = "*" [[package]] name = "decorator" -version = "5.1.0" +version = "5.1.1" description = "Decorators for Humans" category = "main" optional = false @@ -645,11 +649,11 @@ requests = ">=2.0" [[package]] name = "filelock" -version = "3.4.0" +version = "3.4.2" description = "A platform independent file lock." category = "main" optional = true -python-versions = ">=3.6" +python-versions = ">=3.7" [package.extras] docs = ["furo (>=2021.8.17b43)", "sphinx (>=4.1)", "sphinx-autodoc-typehints (>=1.12)"] @@ -714,7 +718,7 @@ python-dateutil = ">=2.7" [[package]] name = "fsspec" -version = "2021.11.1" +version = "2022.1.0" description = "File-system specification" category = "main" optional = false @@ -789,7 +793,7 @@ test = ["betamax (>=0.8.0)", "pytest (>2.3.5)", "betamax-matchers (>=0.1.0)", "u [[package]] name = "gitpython" -version = "3.1.24" +version = "3.1.26" description = "GitPython is a python library used to interact with Git repositories" category = "dev" optional = false @@ -797,11 +801,11 @@ python-versions = ">=3.7" [package.dependencies] gitdb = ">=4.0.1,<5" -typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.10\""} +typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} [[package]] name = "google-api-core" -version = "2.2.2" +version = "2.4.0" description = "Google API client core library" category = "dev" optional = false @@ -869,7 +873,7 @@ grpc = ["grpcio (>=1.8.2,<2.0dev)"] [[package]] name = "google-cloud-storage" -version = "1.43.0" +version = "1.44.0" description = "Google Cloud Storage API client library" category = "dev" optional = false @@ -923,7 +927,7 @@ requests = ["requests (>=2.18.0,<3.0.0dev)"] [[package]] name = "googleapis-common-protos" -version = "1.53.0" +version = "1.54.0" description = "Common protobufs used in Google APIs" category = "dev" optional = false @@ -948,7 +952,7 @@ docs = ["sphinx"] [[package]] name = "grpcio" -version = "1.42.0" +version = "1.43.0" description = "HTTP/2-based RPC framework" category = "main" optional = false @@ -958,7 +962,7 @@ python-versions = ">=3.6" six = ">=1.5.2" [package.extras] -protobuf = ["grpcio-tools (>=1.42.0)"] +protobuf = ["grpcio-tools (>=1.43.0)"] [[package]] name = "h11" @@ -1050,11 +1054,11 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "importlib-metadata" -version = "4.8.2" +version = "4.10.0" description = "Read metadata from Python packages" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} @@ -1063,7 +1067,7 @@ zipp = ">=0.5" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] [[package]] name = "incremental" @@ -1094,7 +1098,7 @@ python-versions = "*" [[package]] name = "isodate" -version = "0.6.0" +version = "0.6.1" description = "An ISO 8601 date/time/duration parser and formatter" category = "dev" optional = false @@ -1332,7 +1336,7 @@ six = "*" [[package]] name = "moto" -version = "2.2.17" +version = "2.2.20" description = "A library that allows your python tests to easily mock out the boto library" category = "dev" optional = false @@ -1693,11 +1697,11 @@ packaging = ">=20.3,<21.0" [[package]] name = "pillow" -version = "8.4.0" +version = "9.0.0" description = "Python Imaging Library (Fork)" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [[package]] name = "pluggy" @@ -1740,7 +1744,7 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "3.19.1" +version = "3.19.3" description = "Protocol Buffers" category = "main" optional = false @@ -1748,7 +1752,7 @@ python-versions = ">=3.5" [[package]] name = "psutil" -version = "5.8.0" +version = "5.9.0" description = "Cross-platform lib for process and system monitoring in Python." category = "dev" optional = false @@ -1759,7 +1763,7 @@ test = ["ipaddress", "mock", "unittest2", "enum34", "pywin32", "wmi"] [[package]] name = "psycopg2-binary" -version = "2.9.2" +version = "2.9.3" description = "psycopg2 - Python-PostgreSQL Database Adapter" category = "main" optional = false @@ -2035,11 +2039,11 @@ testing = ["fields", "hunter", "process-tests", "six", "pytest-xdist", "virtuale [[package]] name = "pytest-forked" -version = "1.3.0" +version = "1.4.0" description = "run tests in isolated forked subprocesses" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.6" [package.dependencies] py = "*" @@ -2079,7 +2083,7 @@ pytest = ">=3.6.0" [[package]] name = "pytest-xdist" -version = "2.4.0" +version = "2.5.0" description = "pytest xdist plugin for distributed testing and loop-on-failing modes" category = "dev" optional = false @@ -2087,7 +2091,7 @@ python-versions = ">=3.6" [package.dependencies] execnet = ">=1.1" -pytest = ">=6.0.0" +pytest = ">=6.2.0" pytest-forked = "*" [package.extras] @@ -2116,7 +2120,7 @@ six = ">=1.5" [[package]] name = "python-engineio" -version = "4.3.0" +version = "4.3.1" description = "Engine.IO server and client for Python" category = "main" optional = false @@ -2128,7 +2132,7 @@ client = ["requests (>=2.21.0)", "websocket-client (>=0.54.0)"] [[package]] name = "python-socketio" -version = "5.5.0" +version = "5.5.1" description = "Socket.IO server and client for Python" category = "main" optional = false @@ -2354,7 +2358,7 @@ crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] [[package]] name = "sacremoses" -version = "0.0.46" +version = "0.0.47" description = "SacreMoses" category = "main" optional = true @@ -2419,14 +2423,14 @@ docs = ["sphinx"] [[package]] name = "sanic-plugin-toolkit" -version = "1.2.0" +version = "1.2.1" description = "The all-in-one toolkit for creating powerful Sanic Plugins" category = "main" optional = false python-versions = ">=3.7,<4.0" [package.dependencies] -sanic = "*" +sanic = ">=21.3.1,<21.12.0" [[package]] name = "sanic-routing" @@ -2611,7 +2615,7 @@ python-versions = "*" [[package]] name = "spacy" -version = "3.2.0" +version = "3.2.1" description = "Industrial-strength Natural Language Processing (NLP) in Python" category = "main" optional = true @@ -2682,7 +2686,7 @@ wasabi = ">=0.8.1,<1.1.0" [[package]] name = "sqlalchemy" -version = "1.4.27" +version = "1.4.29" description = "Database Abstraction Library" category = "main" optional = false @@ -2786,7 +2790,7 @@ python-versions = ">=3.6" [[package]] name = "tensorboard-plugin-wit" -version = "1.8.0" +version = "1.8.1" description = "What-If Tool TensorBoard plugin." category = "main" optional = false @@ -2907,11 +2911,11 @@ python-versions = "*" [[package]] name = "terminaltables" -version = "3.1.0" +version = "3.1.10" description = "Generate simple tables in terminals from a nested list of strings." category = "main" optional = false -python-versions = "*" +python-versions = ">=2.6" [[package]] name = "thinc" @@ -2979,6 +2983,14 @@ category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +[[package]] +name = "tomli" +version = "1.2.3" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.6" + [[package]] name = "toolz" version = "0.11.2" @@ -3077,7 +3089,7 @@ python-versions = "*" [[package]] name = "typeguard" -version = "2.13.2" +version = "2.13.3" description = "Run-time type checker for Python" category = "main" optional = false @@ -3122,7 +3134,7 @@ python-versions = "*" [[package]] name = "types-pytz" -version = "2021.3.1" +version = "2021.3.4" description = "Typing stubs for pytz" category = "dev" optional = false @@ -3130,20 +3142,31 @@ python-versions = "*" [[package]] name = "types-requests" -version = "2.26.1" +version = "2.27.7" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" +[package.dependencies] +types-urllib3 = "<1.27" + [[package]] name = "types-setuptools" -version = "57.4.4" +version = "57.4.7" description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" +[[package]] +name = "types-urllib3" +version = "1.26.7" +description = "Typing stubs for urllib3" +category = "dev" +optional = false +python-versions = "*" + [[package]] name = "typing-extensions" version = "3.7.4.3" @@ -3192,7 +3215,7 @@ python-versions = ">=3.6" [[package]] name = "urllib3" -version = "1.26.7" +version = "1.26.8" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false @@ -3302,15 +3325,15 @@ typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} [[package]] name = "zipp" -version = "3.6.0" +version = "3.7.0" description = "Backport of pathlib-compatible object wrapper for zip files" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] [extras] full = ["spacy", "transformers", "jieba"] @@ -3322,7 +3345,7 @@ transformers = ["transformers"] [metadata] lock-version = "1.1" python-versions = ">=3.7,<3.9" -content-hash = "8cddaa2a91c9f3f1b5914a022b0edfe1c50ffcaca113bc203f9161feb718378b" +content-hash = "2592f3a5728e38357f64e255ffbdd746e9fe453fd81e2736b330284c6b8b89b2" [metadata.files] absl-py = [ @@ -3330,8 +3353,8 @@ absl-py = [ {file = "absl_py-0.13.0-py3-none-any.whl", hash = "sha256:62bd4e248ddb19d81aec8f9446b407ff37c8175c2ba88266a7afa9b4ce4a333b"}, ] aio-pika = [ - {file = "aio-pika-6.8.0.tar.gz", hash = "sha256:1d4305a5f78af3857310b4fe48348cdcf6c097e0e275ea88c2cd08570531a369"}, - {file = "aio_pika-6.8.0-py3-none-any.whl", hash = "sha256:e69afef8695f47c5d107bbdba21bdb845d5c249acb3be53ef5c2d497b02657c0"}, + {file = "aio-pika-6.8.1.tar.gz", hash = "sha256:c2b2b46949a34252ff0e64c3bc208eef1893e5791b51aeefabf1676788d56b66"}, + {file = "aio_pika-6.8.1-py3-none-any.whl", hash = "sha256:059ab8ecc03d73997f64ed28df7269105984232174d0e6406389c4e8ed30941c"}, ] aiofiles = [ {file = "aiofiles-0.8.0-py3-none-any.whl", hash = "sha256:7a973fc22b29e9962d0897805ace5856e6a566ab1f0c8e5c91ff6c866519c937"}, @@ -3377,16 +3400,16 @@ aiohttp = [ {file = "aiohttp-3.7.4.tar.gz", hash = "sha256:5d84ecc73141d0a0d61ece0742bb7ff5751b0657dab8405f899d3ceb104cc7de"}, ] aioresponses = [ - {file = "aioresponses-0.7.2-py2.py3-none-any.whl", hash = "sha256:2f8ff624543066eb465b0238de68d29231e8488f41dc4b5a9dae190982cdae50"}, - {file = "aioresponses-0.7.2.tar.gz", hash = "sha256:82e495d118b74896aa5b4d47e17effb5e2cc783e510ae395ceade5e87cabe89a"}, + {file = "aioresponses-0.7.3-py2.py3-none-any.whl", hash = "sha256:7b1897169062c92fa87d6ecc503ac566ac87fbfacb2504f8ca81c8035a2eb068"}, + {file = "aioresponses-0.7.3.tar.gz", hash = "sha256:2c64ed5710ee8cb4e958c569184dad12f4c9cd5939135cb38f88c6a8261cceb3"}, ] aiormq = [ {file = "aiormq-3.3.1-py3-none-any.whl", hash = "sha256:e584dac13a242589aaf42470fd3006cb0dc5aed6506cbd20357c7ec8bbe4a89e"}, {file = "aiormq-3.3.1.tar.gz", hash = "sha256:8218dd9f7198d6e7935855468326bbacf0089f926c70baa8dd92944cb2496573"}, ] anyio = [ - {file = "anyio-3.4.0-py3-none-any.whl", hash = "sha256:2855a9423524abcdd652d942f8932fda1735210f77a6b392eafd9ff34d3fe020"}, - {file = "anyio-3.4.0.tar.gz", hash = "sha256:24adc69309fb5779bc1e06158e143e0b6d2c56b302a3ac3de3083c705a6ed39d"}, + {file = "anyio-3.5.0-py3-none-any.whl", hash = "sha256:b5fa16c5ff93fa1046f2eeb5bbff2dad4d3514d6cda61d02816dba34fa8c3c2e"}, + {file = "anyio-3.5.0.tar.gz", hash = "sha256:a0aeffe2fb1fdf374a8e4b471444f0f3ac4fb9f5a5b542b48824475e0042a5a6"}, ] appdirs = [ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, @@ -3417,8 +3440,8 @@ attrs = [ {file = "attrs-21.2.0.tar.gz", hash = "sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"}, ] azure-core = [ - {file = "azure-core-1.21.0.zip", hash = "sha256:84c58cb1194c34bb6c4b4f41dcf0430c11d316b84a0b978b28879577c422c5b6"}, - {file = "azure_core-1.21.0-py2.py3-none-any.whl", hash = "sha256:875419a1d2a373ed596ab517871a53b1006a5322d091d3c0ae8df3f92b82bbfa"}, + {file = "azure-core-1.21.1.zip", hash = "sha256:88d2db5cf9a135a7287dc45fdde6b96f9ca62c9567512a3bb3e20e322ce7deb2"}, + {file = "azure_core-1.21.1-py2.py3-none-any.whl", hash = "sha256:3d70e9ec64de92dfae330c15bc69085caceb2d83813ef6c01cc45326f2a4be83"}, ] azure-storage-blob = [ {file = "azure-storage-blob-12.8.1.zip", hash = "sha256:eb37b50ddfb6e558b29f6c8c03b0666514e55d6170bf4624e7261a3af93c6401"}, @@ -3433,11 +3456,11 @@ bidict = [ {file = "bidict-0.21.4.tar.gz", hash = "sha256:42c84ffbe6f8de898af6073b4be9ea7ccedcd78d3474aa844c54e49d5a079f6f"}, ] black = [ - {file = "black-19.10b0-py36-none-any.whl", hash = "sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b"}, - {file = "black-19.10b0.tar.gz", hash = "sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539"}, + {file = "black-21.7b0-py3-none-any.whl", hash = "sha256:1c7aa6ada8ee864db745b22790a32f94b2795c253a75d6d9b5e439ff10d23116"}, + {file = "black-21.7b0.tar.gz", hash = "sha256:c8373c6491de9362e39271630b65b964607bc5c79c83783547d76c839b3aa219"}, ] blis = [ - {file = "blis-0.7.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:98eba77b1e1fde7813bc0453ab78b6ae2067f5bc0fe9e3abc671b2895cfecf33"}, + {file = "blis-0.7.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5812a7c04561ae7332cf730f57d9f82cbd12c5f86a5bfad66ee244e51d06266d"}, {file = "blis-0.7.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eecfce3d8fce61dede7b0ae0dffa461c22072437b6cde85587db0c1aa75b450"}, {file = "blis-0.7.5-cp310-cp310-win_amd64.whl", hash = "sha256:0e476931f0d5703a21c77e7f69b8ebdeeea493fc7858a86f627ac2b376a12c8d"}, {file = "blis-0.7.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:5966ddf3bce84aa7bb09ce4ca059309602fa63280a5d5e5365bb2a294bd5a138"}, @@ -3455,12 +3478,12 @@ blis = [ {file = "blis-0.7.5.tar.gz", hash = "sha256:833e01e9eaff4c01aa6e049bbc1e6acb9eca6ee513d7b35b5bf135d49705ad33"}, ] boto3 = [ - {file = "boto3-1.20.20-py3-none-any.whl", hash = "sha256:6c173ffaf0604e34d6865edf7a9a71e1b3e79bd441b8b465ca4b2d44f840806d"}, - {file = "boto3-1.20.20.tar.gz", hash = "sha256:2c5377b6ab74eeccccd16f0f21537ede87b05c8322b0ccc852a68f36ea6c16c9"}, + {file = "boto3-1.20.35-py3-none-any.whl", hash = "sha256:e0247a901ba3b7ec51f232921764b7ad14f458ed91038a71df9da732e12bbbe1"}, + {file = "boto3-1.20.35.tar.gz", hash = "sha256:42dd9fcb9e033ab19c9dfaeaba745ef9d2db6efe4e9f1e1f547b3e3e0b1f4a82"}, ] botocore = [ - {file = "botocore-1.23.20-py3-none-any.whl", hash = "sha256:98275e47c941cada6507089ecfe91e420972209b1deeceaf55a89ea50d046347"}, - {file = "botocore-1.23.20.tar.gz", hash = "sha256:22e1c7b4b2b8b11d7001ca5ef2b41bda9a8be46fb3cb994a2948462666ac5ef1"}, + {file = "botocore-1.23.35-py3-none-any.whl", hash = "sha256:4cbd668a28e489c8d1909f621684f070309b3ae990667094b344e6ea72337795"}, + {file = "botocore-1.23.35.tar.gz", hash = "sha256:5be6ba6c5ea71c256da8a5023bf9c278847c4b90fdb40f2c4c3bdb21ca11ff28"}, ] cachecontrol = [ {file = "CacheControl-0.12.10-py2.py3-none-any.whl", hash = "sha256:b0d43d8f71948ef5ebdee5fe236b86c6ffc7799370453dccb0e894c20dfa487c"}, @@ -3558,7 +3581,8 @@ colorama = [ {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, ] colorclass = [ - {file = "colorclass-2.2.0.tar.gz", hash = "sha256:b05c2a348dfc1aff2d502527d78a5b7b7e2f85da94a96c5081210d8e9ee8e18b"}, + {file = "colorclass-2.2.2-py2.py3-none-any.whl", hash = "sha256:6f10c273a0ef7a1150b1120b6095cbdd68e5cf36dfd5d0fc957a2500bbf99a55"}, + {file = "colorclass-2.2.2.tar.gz", hash = "sha256:6d4fe287766166a98ca7bc6f6312daf04a0481b1eda43e7173484051c0ab4366"}, ] coloredlogs = [ {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, @@ -3647,7 +3671,7 @@ cycler = [ {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"}, ] cymem = [ - {file = "cymem-2.0.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2b4e27e739f09f16c7c0190f962ffe60dab39cb6a229d5c13e274d16f46a17e8"}, + {file = "cymem-2.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:700540b68e96a7056d0691d467df2bbaaf0934a3e6fe2383669998cbee19580a"}, {file = "cymem-2.0.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:971cf0a8437dfb4185c3049c086e463612fe849efadc0f5cc153fc81c501da7d"}, {file = "cymem-2.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:6b0d1a6b0a1296f31fa9e4b7ae5ea49394084ecc883b1ae6fec4844403c43468"}, {file = "cymem-2.0.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b8e1c18bb00800425576710468299153caad20c64ddb6819d40a6a34e21ee21c"}, @@ -3673,8 +3697,8 @@ dataclasses = [ {file = "dataclasses-0.6.tar.gz", hash = "sha256:6988bd2b895eef432d562370bb707d540f32f7360ab13da45340101bc2307d84"}, ] decorator = [ - {file = "decorator-5.1.0-py3-none-any.whl", hash = "sha256:7b12e7c3c6ab203a29e157335e9122cb03de9ab7264b137594103fd4a683b374"}, - {file = "decorator-5.1.0.tar.gz", hash = "sha256:e59913af105b9860aa2c8d3272d9de5a56a4e608db9a2f167a8480b323d529a7"}, + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, ] deprecated = [ {file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"}, @@ -3727,8 +3751,8 @@ fbmessenger = [ {file = "fbmessenger-6.0.0.tar.gz", hash = "sha256:6e42c4588a4c942547be228886278bbc7a084e0b34799c7e6ebd786129f021e6"}, ] filelock = [ - {file = "filelock-3.4.0-py3-none-any.whl", hash = "sha256:2e139a228bcf56dd8b2274a65174d005c4a6b68540ee0bdbb92c76f43f29f7e8"}, - {file = "filelock-3.4.0.tar.gz", hash = "sha256:93d512b32a23baf4cac44ffd72ccf70732aeff7b8050fcaf6d3ec406d954baf4"}, + {file = "filelock-3.4.2-py3-none-any.whl", hash = "sha256:cf0fc6a2f8d26bd900f19bf33915ca70ba4dd8c56903eeb14e1e7a2fd7590146"}, + {file = "filelock-3.4.2.tar.gz", hash = "sha256:38b4f4c989f9d06d44524df1b24bd19e167d851f19b50bf3e3559952dddc5b80"}, ] fire = [ {file = "fire-0.4.0.tar.gz", hash = "sha256:c5e2b8763699d1142393a46d0e3e790c5eb2f0706082df8f647878842c216a62"}, @@ -3750,8 +3774,8 @@ freezegun = [ {file = "freezegun-1.1.0.tar.gz", hash = "sha256:177f9dd59861d871e27a484c3332f35a6e3f5d14626f2bf91be37891f18927f3"}, ] fsspec = [ - {file = "fsspec-2021.11.1-py3-none-any.whl", hash = "sha256:bcb136caa37e1470dd8314a7d3917cb9b25dd9da44c10d36df556ab4ef038185"}, - {file = "fsspec-2021.11.1.tar.gz", hash = "sha256:03683e606651d5e4bd9180525d57477bd5430e5dc68d2e459835dc14cecc3dd4"}, + {file = "fsspec-2022.1.0-py3-none-any.whl", hash = "sha256:256e2be44e62430c9ca8dac2e480384b00a3c52aef4e2b0b7204163fdc861d37"}, + {file = "fsspec-2022.1.0.tar.gz", hash = "sha256:0bdd519bbf4d8c9a1d893a50b5ebacc89acd0e1fe0045d2f7b0e0c1af5990edc"}, ] future = [ {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"}, @@ -3769,12 +3793,12 @@ gitdb = [ {file = "github3.py-1.3.0.tar.gz", hash = "sha256:15a115c18f7bfcf934dfef7ab103844eb9f620c586bad65967708926da47cbda"}, ] gitpython = [ - {file = "GitPython-3.1.24-py3-none-any.whl", hash = "sha256:dc0a7f2f697657acc8d7f89033e8b1ea94dd90356b2983bca89dc8d2ab3cc647"}, - {file = "GitPython-3.1.24.tar.gz", hash = "sha256:df83fdf5e684fef7c6ee2c02fc68a5ceb7e7e759d08b694088d0cacb4eba59e5"}, + {file = "GitPython-3.1.26-py3-none-any.whl", hash = "sha256:26ac35c212d1f7b16036361ca5cff3ec66e11753a0d677fb6c48fa4e1a9dd8d6"}, + {file = "GitPython-3.1.26.tar.gz", hash = "sha256:fc8868f63a2e6d268fb25f481995ba185a85a66fcad126f039323ff6635669ee"}, ] google-api-core = [ - {file = "google-api-core-2.2.2.tar.gz", hash = "sha256:97349cc18c2bb2415f64f1353a80273a289a61294ce3eb2f7ce682d251bdd997"}, - {file = "google_api_core-2.2.2-py2.py3-none-any.whl", hash = "sha256:e7853735d4f51f4212d6bf9750620d76fc0106c0f271be0c3f43b73501c7ddf9"}, + {file = "google-api-core-2.4.0.tar.gz", hash = "sha256:ba8787b7c61632cd0340f095e1c036bef9426b2594f10afb290ba311ae8cb2cb"}, + {file = "google_api_core-2.4.0-py2.py3-none-any.whl", hash = "sha256:58e2c1171a3d51778bf4e428fbb4bf79cbd05007b4b44deaa80cf73c80eebc0f"}, ] google-auth = [ {file = "google-auth-2.3.3.tar.gz", hash = "sha256:d83570a664c10b97a1dc6f8df87e5fdfff012f48f62be131e449c20dfc32630e"}, @@ -3789,8 +3813,8 @@ google-cloud-core = [ {file = "google_cloud_core-2.2.1-py2.py3-none-any.whl", hash = "sha256:ab6cee07791afe4e210807ceeab749da6a076ab16d496ac734bf7e6ffea27486"}, ] google-cloud-storage = [ - {file = "google-cloud-storage-1.43.0.tar.gz", hash = "sha256:f3b4f4be5c8a1b5727a8f7136c94d3bacdd4b7bf11f9553f51ae4c1d876529d3"}, - {file = "google_cloud_storage-1.43.0-py2.py3-none-any.whl", hash = "sha256:bb3e4088054d50616bd57e4b81bb158db804c91faed39279d666e2fd07d2c118"}, + {file = "google-cloud-storage-1.44.0.tar.gz", hash = "sha256:29edbfeedd157d853049302bf5d104055c6f0cb7ef283537da3ce3f730073001"}, + {file = "google_cloud_storage-1.44.0-py2.py3-none-any.whl", hash = "sha256:cd4a223e9c18d771721a85c98a9c01b97d257edddff833ba63b7b1f0b9b4d6e9"}, ] google-crc32c = [ {file = "google-crc32c-1.3.0.tar.gz", hash = "sha256:276de6273eb074a35bc598f8efbc00c7869c5cf2e29c90748fccc8c898c244df"}, @@ -3847,8 +3871,8 @@ google-resumable-media = [ {file = "google_resumable_media-2.1.0-py2.py3-none-any.whl", hash = "sha256:cdc75ea0361e39704dc7df7da59fbd419e73c8bc92eac94d8a020d36baa9944b"}, ] googleapis-common-protos = [ - {file = "googleapis-common-protos-1.53.0.tar.gz", hash = "sha256:a88ee8903aa0a81f6c3cec2d5cf62d3c8aa67c06439b0496b49048fb1854ebf4"}, - {file = "googleapis_common_protos-1.53.0-py2.py3-none-any.whl", hash = "sha256:f6d561ab8fb16b30020b940e2dd01cd80082f4762fa9f3ee670f4419b4b8dbd0"}, + {file = "googleapis-common-protos-1.54.0.tar.gz", hash = "sha256:a4031d6ec6c2b1b6dc3e0be7e10a1bd72fb0b18b07ef9be7b51f2c1004ce2437"}, + {file = "googleapis_common_protos-1.54.0-py2.py3-none-any.whl", hash = "sha256:e54345a2add15dc5e1a7891c27731ff347b4c33765d79b5ed7026a6c0c7cbcae"}, ] greenlet = [ {file = "greenlet-1.1.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:58df5c2a0e293bf665a51f8a100d3e9956febfbf1d9aaf8c0677cf70218910c6"}, @@ -3903,50 +3927,50 @@ greenlet = [ {file = "greenlet-1.1.2.tar.gz", hash = "sha256:e30f5ea4ae2346e62cedde8794a56858a67b878dd79f7df76a0767e356b1744a"}, ] grpcio = [ - {file = "grpcio-1.42.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:6e5eec67909795f7b1ff2bd941bd6c2661ca5217ea9c35003d73314100786f60"}, - {file = "grpcio-1.42.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:8e8cd9909fdd232ecffb954936fd90c935ebe0b5fce36c88813f8247ce54019c"}, - {file = "grpcio-1.42.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:b4d7115ee08a36f3f50a6233bd78280e40847e078d2a5bb39c0ab0db4490d58f"}, - {file = "grpcio-1.42.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b781f412546830be55644f7c48251d30860f4725981862d4a1ea322f80d9cd34"}, - {file = "grpcio-1.42.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e62140c46d8125927c673c72c960cb387c02b2a1a3c6985a8b0a3914d27c0018"}, - {file = "grpcio-1.42.0-cp310-cp310-win32.whl", hash = "sha256:6b69726d7bbb633133c1b0d780b1357aa9b7a7f714fead6470bab1feb8012806"}, - {file = "grpcio-1.42.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6c0b159b38fcc3bbc3331105197c1f58ac0d294eb83910d136a325a85def88f"}, - {file = "grpcio-1.42.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:53e10d07e541073eb9a84d49ecffb831c3cbb970bcd8cd8de8431e935bf66c2e"}, - {file = "grpcio-1.42.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:7a3c9b8e13365529f9426d4754085e8a9c2ad718a41a46a97e4e30e87bb45eae"}, - {file = "grpcio-1.42.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:66f910b6324ae69625e63db2eb29d833c307cfa36736fe13d2f841656c5f658f"}, - {file = "grpcio-1.42.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:59163b8d2e0d85f0ecbee52b348f867eec7e0f909177564fb3956363f7e616e5"}, - {file = "grpcio-1.42.0-cp36-cp36m-manylinux_2_17_aarch64.whl", hash = "sha256:d92c1721c7981812d0f42dfc8248b15d3b6a2ea79eb8870776364423de2aa245"}, - {file = "grpcio-1.42.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65720d2bf05e2b78c4bffe372f13c41845bae5658fd3f5dd300c374dd240e5cb"}, - {file = "grpcio-1.42.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f385e40846ff81d1c6dce98dcc192c7988a46540758804c4a2e6da5a0e3e3e05"}, - {file = "grpcio-1.42.0-cp36-cp36m-win32.whl", hash = "sha256:ea3560ffbfe08327024380508190103937fef25e355d2259f8b5c003a0732f55"}, - {file = "grpcio-1.42.0-cp36-cp36m-win_amd64.whl", hash = "sha256:29fc36c99161ff307c8ca438346b2e36f81dac5ecdbabc983d0b255d7913fb19"}, - {file = "grpcio-1.42.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:76b5fa4c6d88f804456e763461cf7a1db38b200669f1ba00c579014ab5aa7965"}, - {file = "grpcio-1.42.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:d1451a8c0c01c5b5fdfeb8f777820cb277fb5d797d972f57a41bb82483c44a79"}, - {file = "grpcio-1.42.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:6655df5f31664bac4cd6c9b52f389fd92cd10025504ad83685038f47e11e29d8"}, - {file = "grpcio-1.42.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:5b9f0c4822e3a52a1663a315752c6bbdbed0ec15a660d3e64137335acbb5b7ce"}, - {file = "grpcio-1.42.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:7742606ac2bc03ed10360f4f630e0cc01dce864fe63557254e9adea21bb51416"}, - {file = "grpcio-1.42.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:603d71de14ab1f1fd1254b69ceda73545943461b1f51f82fda9477503330b6ea"}, - {file = "grpcio-1.42.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d08ce780bbd8d1a442d855bd681ed0f7483c65d2c8ed83701a9ea4f13678411f"}, - {file = "grpcio-1.42.0-cp37-cp37m-win32.whl", hash = "sha256:2aba7f93671ec971c5c70db81633b49a2f974aa09a2d811aede344a32bad1896"}, - {file = "grpcio-1.42.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2956da789d74fc35d2c869b3aa45dbf41c5d862c056ca8b5e35a688347ede809"}, - {file = "grpcio-1.42.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:21aa4a111b3381d3dd982a3df62348713b29f651aa9f6dfbc9415adbfe28d2ba"}, - {file = "grpcio-1.42.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:a6f9ed5320b93c029615b75f6c8caf2c76aa6545d8845f3813908892cfc5f84e"}, - {file = "grpcio-1.42.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:3a13953e12dc40ee247b5fe6ef22b5fac8f040a76b814a11bf9f423e82402f28"}, - {file = "grpcio-1.42.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:f721b42a20d886c03d9b1f461b228cdaf02ccf6c4550e263f7fd3ce3ff19a8f1"}, - {file = "grpcio-1.42.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:e2d9c6690d4c88cd51ee395d7ba5bd1d26d7c37e94cb59e7fd62ff21ecaf891d"}, - {file = "grpcio-1.42.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7f66eb220898787d7821a7931e35ae2512ed74f79f75adcd7ea2fb3119ca87d"}, - {file = "grpcio-1.42.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2e3f250e5398bf474c6e140df1b67494bf1e31c5277b5bf93841a564cbc22d0"}, - {file = "grpcio-1.42.0-cp38-cp38-win32.whl", hash = "sha256:06d5364e85e0fa50ee68bffd9c93a6aff869a91c68f1fd7ba1b944e063a0ff9f"}, - {file = "grpcio-1.42.0-cp38-cp38-win_amd64.whl", hash = "sha256:d58b3774ee2084c31aad140586a42e18328d9823959ca006a0b85ad7937fe405"}, - {file = "grpcio-1.42.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:b74bbac7e039cf23ed0c8edd820c31e90a52a22e28a03d45274a0956addde8d2"}, - {file = "grpcio-1.42.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2b264cf303a22c46f8d455f42425c546ad6ce22f183debb8d64226ddf1e039f4"}, - {file = "grpcio-1.42.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:64f2b3e6474e2ad865478b64f0850d15842acbb2623de5f78a60ceabe00c63e0"}, - {file = "grpcio-1.42.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:bf916ee93ea2fd52b5286ed4e19cbbde5e82754914379ea91dc5748550df3b4e"}, - {file = "grpcio-1.42.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:6ef72f0abdb89fb7c366a99e04823ecae5cda9f762f2234f42fc280447277cd6"}, - {file = "grpcio-1.42.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47ab65be9ba7a0beee94bbe2fb1dd03cb7832db9df4d1f8fae215a16b3edeb5e"}, - {file = "grpcio-1.42.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0209f30741de1875413f40e89bec9c647e7afad4a3549a6a1682c1ee23da68ca"}, - {file = "grpcio-1.42.0-cp39-cp39-win32.whl", hash = "sha256:5441d343602ce10ba48fcb36bb5de197a15a01dc9ee0f71c2a73cd5cd3d7f5ac"}, - {file = "grpcio-1.42.0-cp39-cp39-win_amd64.whl", hash = "sha256:17433f7eb01737240581b33fbc2eae7b7fa6d3429571782580bceaf05ec56cb8"}, - {file = "grpcio-1.42.0.tar.gz", hash = "sha256:4a8f2c7490fe3696e0cdd566e2f099fb91b51bc75446125175c55581c2f7bc11"}, + {file = "grpcio-1.43.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:a4e786a8ee8b30b25d70ee52cda6d1dbba2a8ca2f1208d8e20ed8280774f15c8"}, + {file = "grpcio-1.43.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:af9c3742f6c13575c0d4147a8454da0ff5308c4d9469462ff18402c6416942fe"}, + {file = "grpcio-1.43.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:fdac966699707b5554b815acc272d81e619dd0999f187cd52a61aef075f870ee"}, + {file = "grpcio-1.43.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e463b4aa0a6b31cf2e57c4abc1a1b53531a18a570baeed39d8d7b65deb16b7e"}, + {file = "grpcio-1.43.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11d05402e0ac3a284443d8a432d3dfc76a6bd3f7b5858cddd75617af2d7bd9b"}, + {file = "grpcio-1.43.0-cp310-cp310-win32.whl", hash = "sha256:c36f418c925a41fccada8f7ae9a3d3e227bfa837ddbfddd3d8b0ac252d12dda9"}, + {file = "grpcio-1.43.0-cp310-cp310-win_amd64.whl", hash = "sha256:772b943f34374744f70236bbbe0afe413ed80f9ae6303503f85e2b421d4bca92"}, + {file = "grpcio-1.43.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:cbc9b83211d905859dcf234ad39d7193ff0f05bfc3269c364fb0d114ee71de59"}, + {file = "grpcio-1.43.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:fb7229fa2a201a0c377ff3283174ec966da8f9fd7ffcc9a92f162d2e7fc9025b"}, + {file = "grpcio-1.43.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:17b75f220ee6923338155b4fcef4c38802b9a57bc57d112c9599a13a03e99f8d"}, + {file = "grpcio-1.43.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:6620a5b751b099b3b25553cfc03dfcd873cda06f9bb2ff7e9948ac7090e20f05"}, + {file = "grpcio-1.43.0-cp36-cp36m-manylinux_2_17_aarch64.whl", hash = "sha256:1898f999383baac5fcdbdef8ea5b1ef204f38dc211014eb6977ac6e55944d738"}, + {file = "grpcio-1.43.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47b6821238d8978014d23b1132713dac6c2d72cbb561cf257608b1673894f90a"}, + {file = "grpcio-1.43.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80398e9fb598060fa41050d1220f5a2440fe74ff082c36dda41ac3215ebb5ddd"}, + {file = "grpcio-1.43.0-cp36-cp36m-win32.whl", hash = "sha256:0110310eff07bb69782f53b7a947490268c4645de559034c43c0a635612e250f"}, + {file = "grpcio-1.43.0-cp36-cp36m-win_amd64.whl", hash = "sha256:45401d00f2ee46bde75618bf33e9df960daa7980e6e0e7328047191918c98504"}, + {file = "grpcio-1.43.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:af78ac55933811e6a25141336b1f2d5e0659c2f568d44d20539b273792563ca7"}, + {file = "grpcio-1.43.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8b2b9dc4d7897566723b77422e11c009a0ebd397966b165b21b89a62891a9fdf"}, + {file = "grpcio-1.43.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:77ef653f966934b3bfdd00e4f2064b68880eb40cf09b0b99edfa5ee22a44f559"}, + {file = "grpcio-1.43.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:e95b5d62ec26d0cd0b90c202d73e7cb927c369c3358e027225239a4e354967dc"}, + {file = "grpcio-1.43.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:04239e8f71db832c26bbbedb4537b37550a39d77681d748ab4678e58dd6455d6"}, + {file = "grpcio-1.43.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b4a7152187a49767a47d1413edde2304c96f41f7bc92cc512e230dfd0fba095"}, + {file = "grpcio-1.43.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8cc936a29c65ab39714e1ba67a694c41218f98b6e2a64efb83f04d9abc4386b"}, + {file = "grpcio-1.43.0-cp37-cp37m-win32.whl", hash = "sha256:577e024c8dd5f27cd98ba850bc4e890f07d4b5942e5bc059a3d88843a2f48f66"}, + {file = "grpcio-1.43.0-cp37-cp37m-win_amd64.whl", hash = "sha256:138f57e3445d4a48d9a8a5af1538fdaafaa50a0a3c243f281d8df0edf221dc02"}, + {file = "grpcio-1.43.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:08cf25f2936629db062aeddbb594bd76b3383ab0ede75ef0461a3b0bc3a2c150"}, + {file = "grpcio-1.43.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:01f4b887ed703fe82ebe613e1d2dadea517891725e17e7a6134dcd00352bd28c"}, + {file = "grpcio-1.43.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:0aa8285f284338eb68962fe1a830291db06f366ea12f213399b520c062b01f65"}, + {file = "grpcio-1.43.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:0edbfeb6729aa9da33ce7e28fb7703b3754934115454ae45e8cc1db601756fd3"}, + {file = "grpcio-1.43.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:c354017819201053d65212befd1dcb65c2d91b704d8977e696bae79c47cd2f82"}, + {file = "grpcio-1.43.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50cfb7e1067ee5e00b8ab100a6b7ea322d37ec6672c0455106520b5891c4b5f5"}, + {file = "grpcio-1.43.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57f1aeb65ed17dfb2f6cd717cc109910fe395133af7257a9c729c0b9604eac10"}, + {file = "grpcio-1.43.0-cp38-cp38-win32.whl", hash = "sha256:fa26a8bbb3fe57845acb1329ff700d5c7eaf06414c3e15f4cb8923f3a466ef64"}, + {file = "grpcio-1.43.0-cp38-cp38-win_amd64.whl", hash = "sha256:ade8b79a6b6aea68adb9d4bfeba5d647667d842202c5d8f3ba37ac1dc8e5c09c"}, + {file = "grpcio-1.43.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:124e718faf96fe44c98b05f3f475076be8b5198bb4c52a13208acf88a8548ba9"}, + {file = "grpcio-1.43.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2f96142d0abc91290a63ba203f01649e498302b1b6007c67bad17f823ecde0cf"}, + {file = "grpcio-1.43.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:31e6e489ccd8f08884b9349a39610982df48535881ec34f05a11c6e6b6ebf9d0"}, + {file = "grpcio-1.43.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:0e731f660e1e68238f56f4ce11156f02fd06dc58bc7834778d42c0081d4ef5ad"}, + {file = "grpcio-1.43.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:1f16725a320460435a8a5339d8b06c4e00d307ab5ad56746af2e22b5f9c50932"}, + {file = "grpcio-1.43.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4b4543e13acb4806917d883d0f70f21ba93b29672ea81f4aaba14821aaf9bb0"}, + {file = "grpcio-1.43.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:594aaa0469f4fca7773e80d8c27bf1298e7bbce5f6da0f084b07489a708f16ab"}, + {file = "grpcio-1.43.0-cp39-cp39-win32.whl", hash = "sha256:5449ae564349e7a738b8c38583c0aad954b0d5d1dd3cea68953bfc32eaee11e3"}, + {file = "grpcio-1.43.0-cp39-cp39-win_amd64.whl", hash = "sha256:bdf41550815a831384d21a498b20597417fd31bd084deb17d31ceb39ad9acc79"}, + {file = "grpcio-1.43.0.tar.gz", hash = "sha256:735d9a437c262ab039d02defddcb9f8f545d7009ae61c0114e19dda3843febe5"}, ] h11 = [ {file = "h11-0.12.0-py3-none-any.whl", hash = "sha256:36a3cb8c0a032f56e2da7084577878a035d3b61d104230d4bd49c0c6b555a9c6"}, @@ -4010,8 +4034,8 @@ idna = [ {file = "idna-2.10.tar.gz", hash = "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.8.2-py3-none-any.whl", hash = "sha256:53ccfd5c134223e497627b9815d5030edf77d2ed573922f7a0b8f8bb81a1c100"}, - {file = "importlib_metadata-4.8.2.tar.gz", hash = "sha256:75bdec14c397f528724c1bfd9709d660b33a4d2e77387a3358f20b848bb5e5fb"}, + {file = "importlib_metadata-4.10.0-py3-none-any.whl", hash = "sha256:b7cf7d3fef75f1e4c80a96ca660efbd51473d7e8f39b5ab9210febc7809012a4"}, + {file = "importlib_metadata-4.10.0.tar.gz", hash = "sha256:92a8b58ce734b2a4494878e0ecf7d79ccd7a128b5fc6014c401e0b61f006f0f6"}, ] incremental = [ {file = "incremental-21.3.0-py2.py3-none-any.whl", hash = "sha256:92014aebc6a20b78a8084cdd5645eeaa7f74b8933f70fa3ada2cfbd1e3b54321"}, @@ -4026,8 +4050,8 @@ ipaddress = [ {file = "ipaddress-1.0.23.tar.gz", hash = "sha256:b7f8e0369580bb4a24d5ba1d7cc29660a4a6987763faf1d8a8046830e020e7e2"}, ] isodate = [ - {file = "isodate-0.6.0-py2.py3-none-any.whl", hash = "sha256:aa4d33c06640f5352aca96e4b81afd8ab3b47337cc12089822d6f322ac772c81"}, - {file = "isodate-0.6.0.tar.gz", hash = "sha256:2e364a3d5759479cdb2d37cce6b9376ea504db2ff90252a2e5b7cc89cc9ff2d8"}, + {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, + {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, ] jieba = [ {file = "jieba-0.42.1.tar.gz", hash = "sha256:055ca12f62674fafed09427f176506079bc135638a14e23e25be909131928db2"}, @@ -4238,8 +4262,8 @@ mongomock = [ {file = "mongomock-3.23.0.tar.gz", hash = "sha256:d9945e7c87c221aed47c6c10708376351a5f5ee48060943c56ba195be425b0dd"}, ] moto = [ - {file = "moto-2.2.17-py2.py3-none-any.whl", hash = "sha256:73aa14a650cb3bf02ca720b343618a57dda4c2c1d1166708a4c5c98ea9013b29"}, - {file = "moto-2.2.17.tar.gz", hash = "sha256:221ebd16b41b3ae157554ca5e540a8c1b4b1c93443cbf854c1f04751194c51b6"}, + {file = "moto-2.2.20-py2.py3-none-any.whl", hash = "sha256:20f06e51dbdeb3ed76989f733e4e51472cbfe39dda031a49da66fc8df1bcefa4"}, + {file = "moto-2.2.20.tar.gz", hash = "sha256:aa6479df35b47d4df0c3214e118c328df67d9f4e157ec5523ce87a23fecf93f6"}, ] msgpack = [ {file = "msgpack-1.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:96acc674bb9c9be63fa8b6dabc3248fdc575c4adc005c440ad02f87ca7edd079"}, @@ -4356,7 +4380,7 @@ multidict = [ {file = "multidict-5.2.0.tar.gz", hash = "sha256:0dd1c93edb444b33ba2274b66f63def8a327d607c6c790772f448a53b6ea59ce"}, ] murmurhash = [ - {file = "murmurhash-1.0.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a814d559afe2a97ad40accf21ce96e8b04a3ff5a08f80c02b7acd427dbb7d567"}, + {file = "murmurhash-1.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1431d817e1fff1ed35f8dc54dd5b4d70165ec98076de8aca351805f8037293f3"}, {file = "murmurhash-1.0.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c7b8cc4a8db1c821b80f8ca70a25c3166b14d68ecef8693a117c6a0b1d74ace"}, {file = "murmurhash-1.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:e40790fdaf65213d70da4ed9229f16f6d6376310dc8fc23eacc98e6151c6ae7e"}, {file = "murmurhash-1.0.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a78d53f047c3410ce4c589d9b47090f628f844ed5694418144e63cfe7f3da7e9"}, @@ -4518,54 +4542,45 @@ pep440-version-utils = [ {file = "pep440_version_utils-0.3.0-py3-none-any.whl", hash = "sha256:73780b2c31adad5ca35c89eb008f51c2a47aee0318debe31391b673b90577e1b"}, ] pillow = [ - {file = "Pillow-8.4.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:81f8d5c81e483a9442d72d182e1fb6dcb9723f289a57e8030811bac9ea3fef8d"}, - {file = "Pillow-8.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3f97cfb1e5a392d75dd8b9fd274d205404729923840ca94ca45a0af57e13dbe6"}, - {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb9fc393f3c61f9054e1ed26e6fe912c7321af2f41ff49d3f83d05bacf22cc78"}, - {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d82cdb63100ef5eedb8391732375e6d05993b765f72cb34311fab92103314649"}, - {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62cc1afda735a8d109007164714e73771b499768b9bb5afcbbee9d0ff374b43f"}, - {file = "Pillow-8.4.0-cp310-cp310-win32.whl", hash = "sha256:e3dacecfbeec9a33e932f00c6cd7996e62f53ad46fbe677577394aaa90ee419a"}, - {file = "Pillow-8.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:620582db2a85b2df5f8a82ddeb52116560d7e5e6b055095f04ad828d1b0baa39"}, - {file = "Pillow-8.4.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:1bc723b434fbc4ab50bb68e11e93ce5fb69866ad621e3c2c9bdb0cd70e345f55"}, - {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72cbcfd54df6caf85cc35264c77ede902452d6df41166010262374155947460c"}, - {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70ad9e5c6cb9b8487280a02c0ad8a51581dcbbe8484ce058477692a27c151c0a"}, - {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25a49dc2e2f74e65efaa32b153527fc5ac98508d502fa46e74fa4fd678ed6645"}, - {file = "Pillow-8.4.0-cp36-cp36m-win32.whl", hash = "sha256:93ce9e955cc95959df98505e4608ad98281fff037350d8c2671c9aa86bcf10a9"}, - {file = "Pillow-8.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2e4440b8f00f504ee4b53fe30f4e381aae30b0568193be305256b1462216feff"}, - {file = "Pillow-8.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8c803ac3c28bbc53763e6825746f05cc407b20e4a69d0122e526a582e3b5e153"}, - {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a17b5d948f4ceeceb66384727dde11b240736fddeda54ca740b9b8b1556b29"}, - {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1394a6ad5abc838c5cd8a92c5a07535648cdf6d09e8e2d6df916dfa9ea86ead8"}, - {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:792e5c12376594bfcb986ebf3855aa4b7c225754e9a9521298e460e92fb4a488"}, - {file = "Pillow-8.4.0-cp37-cp37m-win32.whl", hash = "sha256:d99ec152570e4196772e7a8e4ba5320d2d27bf22fdf11743dd882936ed64305b"}, - {file = "Pillow-8.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7b7017b61bbcdd7f6363aeceb881e23c46583739cb69a3ab39cb384f6ec82e5b"}, - {file = "Pillow-8.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:d89363f02658e253dbd171f7c3716a5d340a24ee82d38aab9183f7fdf0cdca49"}, - {file = "Pillow-8.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0a0956fdc5defc34462bb1c765ee88d933239f9a94bc37d132004775241a7585"}, - {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b7bb9de00197fb4261825c15551adf7605cf14a80badf1761d61e59da347779"}, - {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72b9e656e340447f827885b8d7a15fc8c4e68d410dc2297ef6787eec0f0ea409"}, - {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5a4532a12314149d8b4e4ad8ff09dde7427731fcfa5917ff16d0291f13609df"}, - {file = "Pillow-8.4.0-cp38-cp38-win32.whl", hash = "sha256:82aafa8d5eb68c8463b6e9baeb4f19043bb31fefc03eb7b216b51e6a9981ae09"}, - {file = "Pillow-8.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:066f3999cb3b070a95c3652712cffa1a748cd02d60ad7b4e485c3748a04d9d76"}, - {file = "Pillow-8.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:5503c86916d27c2e101b7f71c2ae2cddba01a2cf55b8395b0255fd33fa4d1f1a"}, - {file = "Pillow-8.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4acc0985ddf39d1bc969a9220b51d94ed51695d455c228d8ac29fcdb25810e6e"}, - {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b052a619a8bfcf26bd8b3f48f45283f9e977890263e4571f2393ed8898d331b"}, - {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:493cb4e415f44cd601fcec11c99836f707bb714ab03f5ed46ac25713baf0ff20"}, - {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8831cb7332eda5dc89b21a7bce7ef6ad305548820595033a4b03cf3091235ed"}, - {file = "Pillow-8.4.0-cp39-cp39-win32.whl", hash = "sha256:5e9ac5f66616b87d4da618a20ab0a38324dbe88d8a39b55be8964eb520021e02"}, - {file = "Pillow-8.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:3eb1ce5f65908556c2d8685a8f0a6e989d887ec4057326f6c22b24e8a172c66b"}, - {file = "Pillow-8.4.0-pp36-pypy36_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ddc4d832a0f0b4c52fff973a0d44b6c99839a9d016fe4e6a1cb8f3eea96479c2"}, - {file = "Pillow-8.4.0-pp36-pypy36_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3e5ddc44c14042f0844b8cf7d2cd455f6cc80fd7f5eefbe657292cf601d9ad"}, - {file = "Pillow-8.4.0-pp36-pypy36_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c70e94281588ef053ae8998039610dbd71bc509e4acbc77ab59d7d2937b10698"}, - {file = "Pillow-8.4.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:3862b7256046fcd950618ed22d1d60b842e3a40a48236a5498746f21189afbbc"}, - {file = "Pillow-8.4.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4901622493f88b1a29bd30ec1a2f683782e57c3c16a2dbc7f2595ba01f639df"}, - {file = "Pillow-8.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c471a734240653a0ec91dec0996696eea227eafe72a33bd06c92697728046b"}, - {file = "Pillow-8.4.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:244cf3b97802c34c41905d22810846802a3329ddcb93ccc432870243211c79fc"}, - {file = "Pillow-8.4.0.tar.gz", hash = "sha256:b8e2f83c56e141920c39464b852de3719dfbfb6e3c99a2d8da0edf4fb33176ed"}, + {file = "Pillow-9.0.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:113723312215b25c22df1fdf0e2da7a3b9c357a7d24a93ebbe80bfda4f37a8d4"}, + {file = "Pillow-9.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bb47a548cea95b86494a26c89d153fd31122ed65255db5dcbc421a2d28eb3379"}, + {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31b265496e603985fad54d52d11970383e317d11e18e856971bdbb86af7242a4"}, + {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d154ed971a4cc04b93a6d5b47f37948d1f621f25de3e8fa0c26b2d44f24e3e8f"}, + {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fe92813d208ce8aa7d76da878bdc84b90809f79ccbad2a288e9bcbeac1d9bd"}, + {file = "Pillow-9.0.0-cp310-cp310-win32.whl", hash = "sha256:d5dcea1387331c905405b09cdbfb34611050cc52c865d71f2362f354faee1e9f"}, + {file = "Pillow-9.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:52abae4c96b5da630a8b4247de5428f593465291e5b239f3f843a911a3cf0105"}, + {file = "Pillow-9.0.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:72c3110228944019e5f27232296c5923398496b28be42535e3b2dc7297b6e8b6"}, + {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97b6d21771da41497b81652d44191489296555b761684f82b7b544c49989110f"}, + {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72f649d93d4cc4d8cf79c91ebc25137c358718ad75f99e99e043325ea7d56100"}, + {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aaf07085c756f6cb1c692ee0d5a86c531703b6e8c9cae581b31b562c16b98ce"}, + {file = "Pillow-9.0.0-cp37-cp37m-win32.whl", hash = "sha256:03b27b197deb4ee400ed57d8d4e572d2d8d80f825b6634daf6e2c18c3c6ccfa6"}, + {file = "Pillow-9.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a09a9d4ec2b7887f7a088bbaacfd5c07160e746e3d47ec5e8050ae3b2a229e9f"}, + {file = "Pillow-9.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:490e52e99224858f154975db61c060686df8a6b3f0212a678e5d2e2ce24675c9"}, + {file = "Pillow-9.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:500d397ddf4bbf2ca42e198399ac13e7841956c72645513e8ddf243b31ad2128"}, + {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ebd8b9137630a7bbbff8c4b31e774ff05bbb90f7911d93ea2c9371e41039b52"}, + {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd0e5062f11cb3e730450a7d9f323f4051b532781026395c4323b8ad055523c4"}, + {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f3b4522148586d35e78313db4db0df4b759ddd7649ef70002b6c3767d0fdeb7"}, + {file = "Pillow-9.0.0-cp38-cp38-win32.whl", hash = "sha256:0b281fcadbb688607ea6ece7649c5d59d4bbd574e90db6cd030e9e85bde9fecc"}, + {file = "Pillow-9.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:b5050d681bcf5c9f2570b93bee5d3ec8ae4cf23158812f91ed57f7126df91762"}, + {file = "Pillow-9.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:c2067b3bb0781f14059b112c9da5a91c80a600a97915b4f48b37f197895dd925"}, + {file = "Pillow-9.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2d16b6196fb7a54aff6b5e3ecd00f7c0bab1b56eee39214b2b223a9d938c50af"}, + {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98cb63ca63cb61f594511c06218ab4394bf80388b3d66cd61d0b1f63ee0ea69f"}, + {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc462d24500ba707e9cbdef436c16e5c8cbf29908278af053008d9f689f56dee"}, + {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3586e12d874ce2f1bc875a3ffba98732ebb12e18fb6d97be482bd62b56803281"}, + {file = "Pillow-9.0.0-cp39-cp39-win32.whl", hash = "sha256:68e06f8b2248f6dc8b899c3e7ecf02c9f413aab622f4d6190df53a78b93d97a5"}, + {file = "Pillow-9.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:6579f9ba84a3d4f1807c4aab4be06f373017fc65fff43498885ac50a9b47a553"}, + {file = "Pillow-9.0.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:47f5cf60bcb9fbc46011f75c9b45a8b5ad077ca352a78185bd3e7f1d294b98bb"}, + {file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fd8053e1f8ff1844419842fd474fc359676b2e2a2b66b11cc59f4fa0a301315"}, + {file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c5439bfb35a89cac50e81c751317faea647b9a3ec11c039900cd6915831064d"}, + {file = "Pillow-9.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95545137fc56ce8c10de646074d242001a112a92de169986abd8c88c27566a05"}, + {file = "Pillow-9.0.0.tar.gz", hash = "sha256:ee6e2963e92762923956fe5d3479b1fdc3b76c83f290aad131a2f98c3df0593e"}, ] pluggy = [ {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, ] preshed = [ - {file = "preshed-3.0.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a9683730127658b531120b4ed5cff1f2a567318ab75e9ab0f22cc84ae1486c23"}, + {file = "preshed-3.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:66a71ced487516cf81fd0431a3a843514262ae2f33e9a7688b87562258fa75d5"}, {file = "preshed-3.0.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c98f725d8478f3ade4ab1ea00f50a92d2d9406d37276bc46fd8bab1d47452c4"}, {file = "preshed-3.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:ea8aa9610837e907e8442e79300df0a861bfdb4dcaf026a5d9642a688ad04815"}, {file = "preshed-3.0.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e03ae3eee961106a517fcd827b5a7c51f7317236b3e665c989054ab8dc381d28"}, @@ -4588,98 +4603,124 @@ prompt-toolkit = [ {file = "prompt_toolkit-2.0.10.tar.gz", hash = "sha256:f15af68f66e664eaa559d4ac8a928111eebd5feda0c11738b5998045224829db"}, ] protobuf = [ - {file = "protobuf-3.19.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d80f80eb175bf5f1169139c2e0c5ada98b1c098e2b3c3736667f28cbbea39fc8"}, - {file = "protobuf-3.19.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a529e7df52204565bcd33738a7a5f288f3d2d37d86caa5d78c458fa5fabbd54d"}, - {file = "protobuf-3.19.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28ccea56d4dc38d35cd70c43c2da2f40ac0be0a355ef882242e8586c6d66666f"}, - {file = "protobuf-3.19.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8b30a7de128c46b5ecb343917d9fa737612a6e8280f440874e5cc2ba0d79b8f6"}, - {file = "protobuf-3.19.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5935c8ce02e3d89c7900140a8a42b35bc037ec07a6aeb61cc108be8d3c9438a6"}, - {file = "protobuf-3.19.1-cp36-cp36m-win32.whl", hash = "sha256:74f33edeb4f3b7ed13d567881da8e5a92a72b36495d57d696c2ea1ae0cfee80c"}, - {file = "protobuf-3.19.1-cp36-cp36m-win_amd64.whl", hash = "sha256:038daf4fa38a7e818dd61f51f22588d61755160a98db087a046f80d66b855942"}, - {file = "protobuf-3.19.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e51561d72efd5bd5c91490af1f13e32bcba8dab4643761eb7de3ce18e64a853"}, - {file = "protobuf-3.19.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:6e8ea9173403219239cdfd8d946ed101f2ab6ecc025b0fda0c6c713c35c9981d"}, - {file = "protobuf-3.19.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db3532d9f7a6ebbe2392041350437953b6d7a792de10e629c1e4f5a6b1fe1ac6"}, - {file = "protobuf-3.19.1-cp37-cp37m-win32.whl", hash = "sha256:615b426a177780ce381ecd212edc1e0f70db8557ed72560b82096bd36b01bc04"}, - {file = "protobuf-3.19.1-cp37-cp37m-win_amd64.whl", hash = "sha256:d8919368410110633717c406ab5c97e8df5ce93020cfcf3012834f28b1fab1ea"}, - {file = "protobuf-3.19.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:71b0250b0cfb738442d60cab68abc166de43411f2a4f791d31378590bfb71bd7"}, - {file = "protobuf-3.19.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:3cd0458870ea7d1c58e948ac8078f6ba8a7ecc44a57e03032ed066c5bb318089"}, - {file = "protobuf-3.19.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:655264ed0d0efe47a523e2255fc1106a22f6faab7cc46cfe99b5bae085c2a13e"}, - {file = "protobuf-3.19.1-cp38-cp38-win32.whl", hash = "sha256:b691d996c6d0984947c4cf8b7ae2fe372d99b32821d0584f0b90277aa36982d3"}, - {file = "protobuf-3.19.1-cp38-cp38-win_amd64.whl", hash = "sha256:e7e8d2c20921f8da0dea277dfefc6abac05903ceac8e72839b2da519db69206b"}, - {file = "protobuf-3.19.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fd390367fc211cc0ffcf3a9e149dfeca78fecc62adb911371db0cec5c8b7472d"}, - {file = "protobuf-3.19.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:d83e1ef8cb74009bebee3e61cc84b1c9cd04935b72bca0cbc83217d140424995"}, - {file = "protobuf-3.19.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36d90676d6f426718463fe382ec6274909337ca6319d375eebd2044e6c6ac560"}, - {file = "protobuf-3.19.1-cp39-cp39-win32.whl", hash = "sha256:e7b24c11df36ee8e0c085e5b0dc560289e4b58804746fb487287dda51410f1e2"}, - {file = "protobuf-3.19.1-cp39-cp39-win_amd64.whl", hash = "sha256:77d2fadcf369b3f22859ab25bd12bb8e98fb11e05d9ff9b7cd45b711c719c002"}, - {file = "protobuf-3.19.1-py2.py3-none-any.whl", hash = "sha256:e813b1c9006b6399308e917ac5d298f345d95bb31f46f02b60cd92970a9afa17"}, - {file = "protobuf-3.19.1.tar.gz", hash = "sha256:62a8e4baa9cb9e064eb62d1002eca820857ab2138440cb4b3ea4243830f94ca7"}, + {file = "protobuf-3.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1cb2ed66aac593adbf6dca4f07cd7ee7e2958b17bbc85b2cc8bc564ebeb258ec"}, + {file = "protobuf-3.19.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:898bda9cd37ec0c781b598891e86435de80c3bfa53eb483a9dac5a11ec93e942"}, + {file = "protobuf-3.19.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad761ef3be34c8bdc7285bec4b40372a8dad9e70cfbdc1793cd3cf4c1a4ce74"}, + {file = "protobuf-3.19.3-cp310-cp310-win32.whl", hash = "sha256:2cddcbcc222f3144765ccccdb35d3621dc1544da57a9aca7e1944c1a4fe3db11"}, + {file = "protobuf-3.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:6202df8ee8457cb00810c6e76ced480f22a1e4e02c899a14e7b6e6e1de09f938"}, + {file = "protobuf-3.19.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:397d82f1c58b76445469c8c06b8dee1ff67b3053639d054f52599a458fac9bc6"}, + {file = "protobuf-3.19.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e54b8650e849ee8e95e481024bff92cf98f5ec61c7650cb838d928a140adcb63"}, + {file = "protobuf-3.19.3-cp36-cp36m-win32.whl", hash = "sha256:3bf3a07d17ba3511fe5fa916afb7351f482ab5dbab5afe71a7a384274a2cd550"}, + {file = "protobuf-3.19.3-cp36-cp36m-win_amd64.whl", hash = "sha256:afa8122de8064fd577f49ae9eef433561c8ace97a0a7b969d56e8b1d39b5d177"}, + {file = "protobuf-3.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:18c40a1b8721026a85187640f1786d52407dc9c1ba8ec38accb57a46e84015f6"}, + {file = "protobuf-3.19.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:af7238849fa79285d448a24db686517570099739527a03c9c2971cce99cc5ae2"}, + {file = "protobuf-3.19.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e765e6dfbbb02c55e4d6d1145743401a84fc0b508f5a81b2c5a738cf86353139"}, + {file = "protobuf-3.19.3-cp37-cp37m-win32.whl", hash = "sha256:c781402ed5396ab56358d7b866d78c03a77cbc26ba0598d8bb0ac32084b1a257"}, + {file = "protobuf-3.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:544fe9705189b249380fae07952d220c97f5c6c9372a6f936cc83a79601dcb70"}, + {file = "protobuf-3.19.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:84bf3aa3efb00dbe1c7ed55da0f20800b0662541e582d7e62b3e1464d61ed365"}, + {file = "protobuf-3.19.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:3f80a3491eaca767cdd86cb8660dc778f634b44abdb0dffc9b2a8e8d0cd617d0"}, + {file = "protobuf-3.19.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9401d96552befcc7311f5ef8f0fa7dba0ef5fd805466b158b141606cd0ab6a8"}, + {file = "protobuf-3.19.3-cp38-cp38-win32.whl", hash = "sha256:ef02d112c025e83db5d1188a847e358beab3e4bbfbbaf10eaf69e67359af51b2"}, + {file = "protobuf-3.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:1291a0a7db7d792745c99d4657b4c5c4942695c8b1ac1bfb993a34035ec123f7"}, + {file = "protobuf-3.19.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:49677e5e9c7ea1245a90c2e8a00d304598f22ea3aa0628f0e0a530a9e70665fa"}, + {file = "protobuf-3.19.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:df2ba379ee42427e8fcc6a0a76843bff6efb34ef5266b17f95043939b5e25b69"}, + {file = "protobuf-3.19.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2acd7ca329be544d1a603d5f13a4e34a3791c90d651ebaf130ba2e43ae5397c6"}, + {file = "protobuf-3.19.3-cp39-cp39-win32.whl", hash = "sha256:b53519b2ebec70cfe24b4ddda21e9843f0918d7c3627a785393fb35d402ab8ad"}, + {file = "protobuf-3.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:8ceaf5fdb72c8e1fcb7be9f2b3b07482ce058a3548180c0bdd5c7e4ac5e14165"}, + {file = "protobuf-3.19.3-py2.py3-none-any.whl", hash = "sha256:f6d4b5b7595a57e69eb7314c67bef4a3c745b4caf91accaf72913d8e0635111b"}, + {file = "protobuf-3.19.3.tar.gz", hash = "sha256:d975a6314fbf5c524d4981e24294739216b5fb81ef3c14b86fb4b045d6690907"}, ] psutil = [ - {file = "psutil-5.8.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:0066a82f7b1b37d334e68697faba68e5ad5e858279fd6351c8ca6024e8d6ba64"}, - {file = "psutil-5.8.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0ae6f386d8d297177fd288be6e8d1afc05966878704dad9847719650e44fc49c"}, - {file = "psutil-5.8.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:12d844996d6c2b1d3881cfa6fa201fd635971869a9da945cf6756105af73d2df"}, - {file = "psutil-5.8.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:02b8292609b1f7fcb34173b25e48d0da8667bc85f81d7476584d889c6e0f2131"}, - {file = "psutil-5.8.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6ffe81843131ee0ffa02c317186ed1e759a145267d54fdef1bc4ea5f5931ab60"}, - {file = "psutil-5.8.0-cp27-none-win32.whl", hash = "sha256:ea313bb02e5e25224e518e4352af4bf5e062755160f77e4b1767dd5ccb65f876"}, - {file = "psutil-5.8.0-cp27-none-win_amd64.whl", hash = "sha256:5da29e394bdedd9144c7331192e20c1f79283fb03b06e6abd3a8ae45ffecee65"}, - {file = "psutil-5.8.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:74fb2557d1430fff18ff0d72613c5ca30c45cdbfcddd6a5773e9fc1fe9364be8"}, - {file = "psutil-5.8.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:74f2d0be88db96ada78756cb3a3e1b107ce8ab79f65aa885f76d7664e56928f6"}, - {file = "psutil-5.8.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:99de3e8739258b3c3e8669cb9757c9a861b2a25ad0955f8e53ac662d66de61ac"}, - {file = "psutil-5.8.0-cp36-cp36m-win32.whl", hash = "sha256:36b3b6c9e2a34b7d7fbae330a85bf72c30b1c827a4366a07443fc4b6270449e2"}, - {file = "psutil-5.8.0-cp36-cp36m-win_amd64.whl", hash = "sha256:52de075468cd394ac98c66f9ca33b2f54ae1d9bff1ef6b67a212ee8f639ec06d"}, - {file = "psutil-5.8.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c6a5fd10ce6b6344e616cf01cc5b849fa8103fbb5ba507b6b2dee4c11e84c935"}, - {file = "psutil-5.8.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:61f05864b42fedc0771d6d8e49c35f07efd209ade09a5afe6a5059e7bb7bf83d"}, - {file = "psutil-5.8.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:0dd4465a039d343925cdc29023bb6960ccf4e74a65ad53e768403746a9207023"}, - {file = "psutil-5.8.0-cp37-cp37m-win32.whl", hash = "sha256:1bff0d07e76114ec24ee32e7f7f8d0c4b0514b3fae93e3d2aaafd65d22502394"}, - {file = "psutil-5.8.0-cp37-cp37m-win_amd64.whl", hash = "sha256:fcc01e900c1d7bee2a37e5d6e4f9194760a93597c97fee89c4ae51701de03563"}, - {file = "psutil-5.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6223d07a1ae93f86451d0198a0c361032c4c93ebd4bf6d25e2fb3edfad9571ef"}, - {file = "psutil-5.8.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:d225cd8319aa1d3c85bf195c4e07d17d3cd68636b8fc97e6cf198f782f99af28"}, - {file = "psutil-5.8.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:28ff7c95293ae74bf1ca1a79e8805fcde005c18a122ca983abf676ea3466362b"}, - {file = "psutil-5.8.0-cp38-cp38-win32.whl", hash = "sha256:ce8b867423291cb65cfc6d9c4955ee9bfc1e21fe03bb50e177f2b957f1c2469d"}, - {file = "psutil-5.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:90f31c34d25b1b3ed6c40cdd34ff122b1887a825297c017e4cbd6796dd8b672d"}, - {file = "psutil-5.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6323d5d845c2785efb20aded4726636546b26d3b577aded22492908f7c1bdda7"}, - {file = "psutil-5.8.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:245b5509968ac0bd179287d91210cd3f37add77dad385ef238b275bad35fa1c4"}, - {file = "psutil-5.8.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:90d4091c2d30ddd0a03e0b97e6a33a48628469b99585e2ad6bf21f17423b112b"}, - {file = "psutil-5.8.0-cp39-cp39-win32.whl", hash = "sha256:ea372bcc129394485824ae3e3ddabe67dc0b118d262c568b4d2602a7070afdb0"}, - {file = "psutil-5.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:f4634b033faf0d968bb9220dd1c793b897ab7f1189956e1aa9eae752527127d3"}, - {file = "psutil-5.8.0.tar.gz", hash = "sha256:0c9ccb99ab76025f2f0bbecf341d4656e9c1351db8cc8a03ccd62e318ab4b5c6"}, + {file = "psutil-5.9.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:55ce319452e3d139e25d6c3f85a1acf12d1607ddedea5e35fb47a552c051161b"}, + {file = "psutil-5.9.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:7336292a13a80eb93c21f36bde4328aa748a04b68c13d01dfddd67fc13fd0618"}, + {file = "psutil-5.9.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:cb8d10461c1ceee0c25a64f2dd54872b70b89c26419e147a05a10b753ad36ec2"}, + {file = "psutil-5.9.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:7641300de73e4909e5d148e90cc3142fb890079e1525a840cf0dfd39195239fd"}, + {file = "psutil-5.9.0-cp27-none-win32.whl", hash = "sha256:ea42d747c5f71b5ccaa6897b216a7dadb9f52c72a0fe2b872ef7d3e1eacf3ba3"}, + {file = "psutil-5.9.0-cp27-none-win_amd64.whl", hash = "sha256:ef216cc9feb60634bda2f341a9559ac594e2eeaadd0ba187a4c2eb5b5d40b91c"}, + {file = "psutil-5.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90a58b9fcae2dbfe4ba852b57bd4a1dded6b990a33d6428c7614b7d48eccb492"}, + {file = "psutil-5.9.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff0d41f8b3e9ebb6b6110057e40019a432e96aae2008951121ba4e56040b84f3"}, + {file = "psutil-5.9.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:742c34fff804f34f62659279ed5c5b723bb0195e9d7bd9907591de9f8f6558e2"}, + {file = "psutil-5.9.0-cp310-cp310-win32.whl", hash = "sha256:8293942e4ce0c5689821f65ce6522ce4786d02af57f13c0195b40e1edb1db61d"}, + {file = "psutil-5.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:9b51917c1af3fa35a3f2dabd7ba96a2a4f19df3dec911da73875e1edaf22a40b"}, + {file = "psutil-5.9.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e9805fed4f2a81de98ae5fe38b75a74c6e6ad2df8a5c479594c7629a1fe35f56"}, + {file = "psutil-5.9.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c51f1af02334e4b516ec221ee26b8fdf105032418ca5a5ab9737e8c87dafe203"}, + {file = "psutil-5.9.0-cp36-cp36m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32acf55cb9a8cbfb29167cd005951df81b567099295291bcfd1027365b36591d"}, + {file = "psutil-5.9.0-cp36-cp36m-win32.whl", hash = "sha256:e5c783d0b1ad6ca8a5d3e7b680468c9c926b804be83a3a8e95141b05c39c9f64"}, + {file = "psutil-5.9.0-cp36-cp36m-win_amd64.whl", hash = "sha256:d62a2796e08dd024b8179bd441cb714e0f81226c352c802fca0fd3f89eeacd94"}, + {file = "psutil-5.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3d00a664e31921009a84367266b35ba0aac04a2a6cad09c550a89041034d19a0"}, + {file = "psutil-5.9.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7779be4025c540d1d65a2de3f30caeacc49ae7a2152108adeaf42c7534a115ce"}, + {file = "psutil-5.9.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:072664401ae6e7c1bfb878c65d7282d4b4391f1bc9a56d5e03b5a490403271b5"}, + {file = "psutil-5.9.0-cp37-cp37m-win32.whl", hash = "sha256:df2c8bd48fb83a8408c8390b143c6a6fa10cb1a674ca664954de193fdcab36a9"}, + {file = "psutil-5.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1d7b433519b9a38192dfda962dd8f44446668c009833e1429a52424624f408b4"}, + {file = "psutil-5.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c3400cae15bdb449d518545cbd5b649117de54e3596ded84aacabfbb3297ead2"}, + {file = "psutil-5.9.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2237f35c4bbae932ee98902a08050a27821f8f6dfa880a47195e5993af4702d"}, + {file = "psutil-5.9.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1070a9b287846a21a5d572d6dddd369517510b68710fca56b0e9e02fd24bed9a"}, + {file = "psutil-5.9.0-cp38-cp38-win32.whl", hash = "sha256:76cebf84aac1d6da5b63df11fe0d377b46b7b500d892284068bacccf12f20666"}, + {file = "psutil-5.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:3151a58f0fbd8942ba94f7c31c7e6b310d2989f4da74fcbf28b934374e9bf841"}, + {file = "psutil-5.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:539e429da49c5d27d5a58e3563886057f8fc3868a5547b4f1876d9c0f007bccf"}, + {file = "psutil-5.9.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58c7d923dc209225600aec73aa2c4ae8ea33b1ab31bc11ef8a5933b027476f07"}, + {file = "psutil-5.9.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3611e87eea393f779a35b192b46a164b1d01167c9d323dda9b1e527ea69d697d"}, + {file = "psutil-5.9.0-cp39-cp39-win32.whl", hash = "sha256:4e2fb92e3aeae3ec3b7b66c528981fd327fb93fd906a77215200404444ec1845"}, + {file = "psutil-5.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:7d190ee2eaef7831163f254dc58f6d2e2a22e27382b936aab51c835fc080c3d3"}, + {file = "psutil-5.9.0.tar.gz", hash = "sha256:869842dbd66bb80c3217158e629d6fceaecc3a3166d3d1faee515b05dd26ca25"}, ] psycopg2-binary = [ - {file = "psycopg2-binary-2.9.2.tar.gz", hash = "sha256:234b1f48488b2f86aac04fb00cb04e5e9bcb960f34fa8a8e41b73149d581a93b"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:c0e1fb7097ded2cc44d9037cfc68ad86a30341261492e7de95d180e534969fb2"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:717525cdc97b23182ff6f470fb5bf6f0bc796b5a7000c6f6699d6679991e4a5e"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3865d0cd919349c45603bd7e80249a382c5ecf8106304cfd153282adf9684b6a"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:daf6b5c62eb738872d61a1fa740d7768904911ba5a7e055ed72169d379b58beb"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-manylinux_2_24_ppc64le.whl", hash = "sha256:3ac83656ff4fbe7f2a956ab085e3eb1d678df54759965d509bdd6a06ce520d49"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-win32.whl", hash = "sha256:a04cfa231e7d9b63639e62166a4051cb47ca599fa341463fa3e1c48585fcee64"}, - {file = "psycopg2_binary-2.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:c6e16e085fe6dc6c099ee0be56657aa9ad71027465ef9591d302ba230c404c7e"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:53912199abb626a7249c662e72b70b4f57bf37f840599cec68625171435790dd"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:029e09a892b9ebc3c77851f69ce0720e1b72a9c6850460cee49b14dfbf9ccdd2"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db1b03c189f85b8df29030ad32d521dd7dcb862fd5f8892035314f5b886e70ce"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-manylinux_2_24_aarch64.whl", hash = "sha256:2eecbdc5fa5886f2dd6cc673ce4291cc0fb8900965315268960ad9c2477f8276"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-manylinux_2_24_ppc64le.whl", hash = "sha256:a77e98c68b0e6c51d4d6a994d22b30e77276cbd33e4aabdde03b9ad3a2c148aa"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-win32.whl", hash = "sha256:bf31e6fdb4ec1f6d98a07f48836508ed6edd19b48b13bbf168fbc1bd014b4ca2"}, - {file = "psycopg2_binary-2.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f9c37ecb173d76cf49e519133fd70851b8f9c38b6b8c1cb7fcfc71368d4cc6fc"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:a507db7758953b1b170c4310691a1a89877029b1e11b08ba5fc8ae3ddb35596b"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e4bbcfb403221ea1953f3e0a85cef00ed15c1683a66cf35c956a7e37c33a4c4"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4dff0f15af6936c6fe6da7067b4216edbbe076ad8625da819cc066591b1133c"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:8d2aafe46eb87742425ece38130510fbb035787ee89a329af299029c4d9ae318"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-manylinux_2_24_ppc64le.whl", hash = "sha256:37c8f00f7a2860bac9f7a54f03c243fc1dd9b367e5b2b52f5a02e5f4e9d8c49b"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-win32.whl", hash = "sha256:ef97578fab5115e3af4334dd3376dea3c3a79328a3314b21ec7ced02920b916d"}, - {file = "psycopg2_binary-2.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7e6bd4f532c2cd297b81114526176b240109a1c52020adca69c3f3226c65dc18"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:eeee7b18c51d02e49bf1984d7af26e8843fe68e31fa1cbab5366ebdfa1c89ade"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:497372cc76e6cbce2f51b37be141f360a321423c03eb9be45524b1d123f4cd11"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5671699aff57d22a245b7f4bba89e3de97dc841c5e98bd7f685429b2b20eca47"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:b9d45374ba98c1184df9cce93a0b766097544f8bdfcd5de83ff10f939c193125"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-manylinux_2_24_ppc64le.whl", hash = "sha256:a1852c5bef7e5f52bd43fde5eda610d4df0fb2efc31028150933e84b4140d47a"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-win32.whl", hash = "sha256:108b0380969ddab7c8ef2a813a57f87b308b2f88ec15f1a1e7b653964a3cfb25"}, - {file = "psycopg2_binary-2.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:14427437117f38e65f71db65d8eafd0e86837be456567798712b8da89db2b2dd"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:578c279cd1ce04f05ae0912530ece00bab92854911808e5aec27588aba87e361"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2dea4deac3dd3687e32daeb0712ee96c535970dfdded37a11de6a21145ab0e"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b592f09ff18cfcc9037b9a976fcd62db48cae9dbd5385f2471d4c2ba40c52b4d"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:3a320e7a804f3886a599fea507364aaafbb8387027fffcdfbd34d96316c806c7"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-manylinux_2_24_ppc64le.whl", hash = "sha256:7585ca73dcfe326f31fafa8f96e6bb98ea9e9e46c7a1924ec8101d797914ae27"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-win32.whl", hash = "sha256:9c0aaad07941419926b9bd00171e49fe6b06e42e5527fb91671e137fe6c93d77"}, - {file = "psycopg2_binary-2.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:aa2847d8073951dbc84c4f8b32c620764db3c2eb0d99a04835fecfab7d04816e"}, + {file = "psycopg2-binary-2.9.3.tar.gz", hash = "sha256:761df5313dc15da1502b21453642d7599d26be88bff659382f8f9747c7ebea4e"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:539b28661b71da7c0e428692438efbcd048ca21ea81af618d845e06ebfd29478"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e82d38390a03da28c7985b394ec3f56873174e2c88130e6966cb1c946508e65"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57804fc02ca3ce0dbfbef35c4b3a4a774da66d66ea20f4bda601294ad2ea6092"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:083a55275f09a62b8ca4902dd11f4b33075b743cf0d360419e2051a8a5d5ff76"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_24_ppc64le.whl", hash = "sha256:0a29729145aaaf1ad8bafe663131890e2111f13416b60e460dae0a96af5905c9"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a79d622f5206d695d7824cbf609a4f5b88ea6d6dab5f7c147fc6d333a8787e4"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:090f3348c0ab2cceb6dfbe6bf721ef61262ddf518cd6cc6ecc7d334996d64efa"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a9e1f75f96ea388fbcef36c70640c4efbe4650658f3d6a2967b4cc70e907352e"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c3ae8e75eb7160851e59adc77b3a19a976e50622e44fd4fd47b8b18208189d42"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-win32.whl", hash = "sha256:7b1e9b80afca7b7a386ef087db614faebbf8839b7f4db5eb107d0f1a53225029"}, + {file = "psycopg2_binary-2.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:8b344adbb9a862de0c635f4f0425b7958bf5a4b927c8594e6e8d261775796d53"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:e847774f8ffd5b398a75bc1c18fbb56564cda3d629fe68fd81971fece2d3c67e"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:68641a34023d306be959101b345732360fc2ea4938982309b786f7be1b43a4a1"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3303f8807f342641851578ee7ed1f3efc9802d00a6f83c101d21c608cb864460"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_24_aarch64.whl", hash = "sha256:e3699852e22aa68c10de06524a3721ade969abf382da95884e6a10ff798f9281"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_24_ppc64le.whl", hash = "sha256:526ea0378246d9b080148f2d6681229f4b5964543c170dd10bf4faaab6e0d27f"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:b1c8068513f5b158cf7e29c43a77eb34b407db29aca749d3eb9293ee0d3103ca"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:15803fa813ea05bef089fa78835118b5434204f3a17cb9f1e5dbfd0b9deea5af"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:152f09f57417b831418304c7f30d727dc83a12761627bb826951692cc6491e57"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:404224e5fef3b193f892abdbf8961ce20e0b6642886cfe1fe1923f41aaa75c9d"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-win32.whl", hash = "sha256:1f6b813106a3abdf7b03640d36e24669234120c72e91d5cbaeb87c5f7c36c65b"}, + {file = "psycopg2_binary-2.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:2d872e3c9d5d075a2e104540965a1cf898b52274a5923936e5bfddb58c59c7c2"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:10bb90fb4d523a2aa67773d4ff2b833ec00857f5912bafcfd5f5414e45280fb1"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a52ecab70af13e899f7847b3e074eeb16ebac5615665db33bce8a1009cf33"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a29b3ca4ec9defec6d42bf5feb36bb5817ba3c0230dd83b4edf4bf02684cd0ae"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:12b11322ea00ad8db8c46f18b7dfc47ae215e4df55b46c67a94b4effbaec7094"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_24_ppc64le.whl", hash = "sha256:53293533fcbb94c202b7c800a12c873cfe24599656b341f56e71dd2b557be063"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c381bda330ddf2fccbafab789d83ebc6c53db126e4383e73794c74eedce855ef"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9d29409b625a143649d03d0fd7b57e4b92e0ecad9726ba682244b73be91d2fdb"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:183a517a3a63503f70f808b58bfbf962f23d73b6dccddae5aa56152ef2bcb232"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:15c4e4cfa45f5a60599d9cec5f46cd7b1b29d86a6390ec23e8eebaae84e64554"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-win32.whl", hash = "sha256:adf20d9a67e0b6393eac162eb81fb10bc9130a80540f4df7e7355c2dd4af9fba"}, + {file = "psycopg2_binary-2.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2f9ffd643bc7349eeb664eba8864d9e01f057880f510e4681ba40a6532f93c71"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:def68d7c21984b0f8218e8a15d514f714d96904265164f75f8d3a70f9c295667"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dffc08ca91c9ac09008870c9eb77b00a46b3378719584059c034b8945e26b272"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:280b0bb5cbfe8039205c7981cceb006156a675362a00fe29b16fbc264e242834"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:af9813db73395fb1fc211bac696faea4ca9ef53f32dc0cfa27e4e7cf766dcf24"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_24_ppc64le.whl", hash = "sha256:63638d875be8c2784cfc952c9ac34e2b50e43f9f0a0660b65e2a87d656b3116c"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ffb7a888a047696e7f8240d649b43fb3644f14f0ee229077e7f6b9f9081635bd"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0c9d5450c566c80c396b7402895c4369a410cab5a82707b11aee1e624da7d004"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:d1c1b569ecafe3a69380a94e6ae09a4789bbb23666f3d3a08d06bbd2451f5ef1"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8fc53f9af09426a61db9ba357865c77f26076d48669f2e1bb24d85a22fb52307"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-win32.whl", hash = "sha256:6472a178e291b59e7f16ab49ec8b4f3bdada0a879c68d3817ff0963e722a82ce"}, + {file = "psycopg2_binary-2.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:35168209c9d51b145e459e05c31a9eaeffa9a6b0fd61689b48e07464ffd1a83e"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:47133f3f872faf28c1e87d4357220e809dfd3fa7c64295a4a148bcd1e6e34ec9"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91920527dea30175cc02a1099f331aa8c1ba39bf8b7762b7b56cbf54bc5cce42"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:887dd9aac71765ac0d0bac1d0d4b4f2c99d5f5c1382d8b770404f0f3d0ce8a39"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:1f14c8b0942714eb3c74e1e71700cbbcb415acbc311c730370e70c578a44a25c"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_24_ppc64le.whl", hash = "sha256:7af0dd86ddb2f8af5da57a976d27cd2cd15510518d582b478fbb2292428710b4"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:93cd1967a18aa0edd4b95b1dfd554cf15af657cb606280996d393dadc88c3c35"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bda845b664bb6c91446ca9609fc69f7db6c334ec5e4adc87571c34e4f47b7ddb"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:01310cf4cf26db9aea5158c217caa92d291f0500051a6469ac52166e1a16f5b7"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:99485cab9ba0fa9b84f1f9e1fef106f44a46ef6afdeec8885e0b88d0772b49e8"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-win32.whl", hash = "sha256:46f0e0a6b5fa5851bbd9ab1bc805eef362d3a230fbdfbc209f4a236d0a7a990d"}, + {file = "psycopg2_binary-2.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:accfe7e982411da3178ec690baaceaad3c278652998b2c45828aaac66cd8285f"}, ] py = [ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, @@ -4881,8 +4922,8 @@ pytest-cov = [ {file = "pytest_cov-2.12.1-py2.py3-none-any.whl", hash = "sha256:261bb9e47e65bd099c89c3edf92972865210c36813f80ede5277dceb77a4a62a"}, ] pytest-forked = [ - {file = "pytest-forked-1.3.0.tar.gz", hash = "sha256:6aa9ac7e00ad1a539c41bec6d21011332de671e938c7637378ec9710204e37ca"}, - {file = "pytest_forked-1.3.0-py2.py3-none-any.whl", hash = "sha256:dc4147784048e70ef5d437951728825a131b81714b398d5d52f17c7c144d8815"}, + {file = "pytest-forked-1.4.0.tar.gz", hash = "sha256:8b67587c8f98cbbadfdd804539ed5455b6ed03802203485dd2f53c1422d7440e"}, + {file = "pytest_forked-1.4.0-py3-none-any.whl", hash = "sha256:bbbb6717efc886b9d64537b41fb1497cfaf3c9601276be8da2cccfea5a3c8ad8"}, ] pytest-sanic = [] pytest-timeout = [ @@ -4890,8 +4931,8 @@ pytest-timeout = [ {file = "pytest_timeout-1.4.2-py2.py3-none-any.whl", hash = "sha256:541d7aa19b9a6b4e475c759fd6073ef43d7cdc9a92d95644c260076eb257a063"}, ] pytest-xdist = [ - {file = "pytest-xdist-2.4.0.tar.gz", hash = "sha256:89b330316f7fc475f999c81b577c2b926c9569f3d397ae432c0c2e2496d61ff9"}, - {file = "pytest_xdist-2.4.0-py3-none-any.whl", hash = "sha256:7b61ebb46997a0820a263553179d6d1e25a8c50d8a8620cd1aa1e20e3be99168"}, + {file = "pytest-xdist-2.5.0.tar.gz", hash = "sha256:4580deca3ff04ddb2ac53eba39d76cb5dd5edeac050cb6fbc768b0dd712b4edf"}, + {file = "pytest_xdist-2.5.0-py3-none-any.whl", hash = "sha256:6fe5c74fec98906deb8f2d2b616b5c782022744978e7bd4695d39c8f42d0ce65"}, ] python-crfsuite = [ {file = "python-crfsuite-0.9.7.tar.gz", hash = "sha256:3b4538d2ce5007e4e42005818247bf43ade89ef08a66d158462e2f7c5d63cee7"}, @@ -4931,12 +4972,12 @@ python-dateutil = [ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] python-engineio = [ - {file = "python-engineio-4.3.0.tar.gz", hash = "sha256:fed35eeacfa21f53f1fc05ef0cadd65a50780364da3a2be7650eb92f928fdb11"}, - {file = "python_engineio-4.3.0-py3-none-any.whl", hash = "sha256:ad06a975f7e14cb3bb7137cbf70fd883804484d29acd58004d1db1e2a7fc0ad3"}, + {file = "python-engineio-4.3.1.tar.gz", hash = "sha256:6e1d26977ffefe3b7da1b5df7a8750aedc7686da8201cd90daf36693db122489"}, + {file = "python_engineio-4.3.1-py3-none-any.whl", hash = "sha256:85986067cb9f7695347954d4e03491f7d45152c5428c07109a9707e04e8942cb"}, ] python-socketio = [ - {file = "python-socketio-5.5.0.tar.gz", hash = "sha256:ce972ea1b82aa1811fa10d30cf0d5c251b9a1558c3d66829b6fe70854bcccf0b"}, - {file = "python_socketio-5.5.0-py3-none-any.whl", hash = "sha256:ca28a0ff0ca5dd05ec5ba4ee2572fe06b96d6f0bc7df384d8b50fbbc06986134"}, + {file = "python-socketio-5.5.1.tar.gz", hash = "sha256:ac8e64d59a15d1c31a4fe8434f4ff16d0f640c824ba517dce7ca99e95f0cd36a"}, + {file = "python_socketio-5.5.1-py3-none-any.whl", hash = "sha256:d0b98474064ac239a618649ca67f5288827705d36bd5f7615a473b37965baf61"}, ] pytz = [ {file = "pytz-2021.3-py2.py3-none-any.whl", hash = "sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c"}, @@ -5092,8 +5133,7 @@ s3transfer = [ {file = "s3transfer-0.5.0.tar.gz", hash = "sha256:50ed823e1dc5868ad40c8dc92072f757aa0e653a192845c94a3b676f4a62da4c"}, ] sacremoses = [ - {file = "sacremoses-0.0.46-py3-none-any.whl", hash = "sha256:f95f80d09d3501fed5c1d3056d9212b40599b08cb27f185d38ff0063be8ddd09"}, - {file = "sacremoses-0.0.46.tar.gz", hash = "sha256:4b1fe8813915c7a2647728a46ebbf0d1a65eabb7ca05ba10efeb0548547eea38"}, + {file = "sacremoses-0.0.47-py2.py3-none-any.whl", hash = "sha256:7622c6e9fe12d45b7acf4528451bd054c1557c1f6779398f9cd9f28332d92a0b"}, ] sanic = [ {file = "sanic-21.9.3-py3-none-any.whl", hash = "sha256:354fbc9e5382df23989752067a57d042aef99bf5a8e85593a3e0112276bff9e8"}, @@ -5108,8 +5148,8 @@ sanic-jwt = [ {file = "sanic_jwt-1.7.0-py3-none-any.whl", hash = "sha256:1dbcfec93cef77a41835aa38eae27780fc4255b9c1b067674ce2f0d2f5d45353"}, ] sanic-plugin-toolkit = [ - {file = "sanic-plugin-toolkit-1.2.0.tar.gz", hash = "sha256:5d477e6c1832968d43c7112e1a445afc98153fa05236c4db32b68e2cfff86001"}, - {file = "sanic_plugin_toolkit-1.2.0-py3-none-any.whl", hash = "sha256:3ab62af7c2d3e73d37d217976ec11f3683bde588d647e883a42763a894058b50"}, + {file = "sanic-plugin-toolkit-1.2.1.tar.gz", hash = "sha256:9f6eeca2e28b915dba4be8584ebb5a5f76a3f6895fe34572042ec275b13e41e1"}, + {file = "sanic_plugin_toolkit-1.2.1-py3-none-any.whl", hash = "sha256:aabea0dc1fd71969567c6a0fa419b55a727c0a5be372f166560e0cdbb9c30abb"}, ] sanic-routing = [ {file = "sanic-routing-0.7.2.tar.gz", hash = "sha256:139ce88b3f054e7aa336e2ecc8459837092b103b275d3a97609a34092c55374d"}, @@ -5155,9 +5195,6 @@ scikit-learn = [ {file = "scikit_learn-0.24.2-cp39-cp39-win_amd64.whl", hash = "sha256:40556bea1ef26ef54bc678d00cf138a63069144a0b5f3a436eecd8f3468b903e"}, ] scipy = [ - {file = "scipy-1.7.3-1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:c9e04d7e9b03a8a6ac2045f7c5ef741be86727d8f49c45db45f244bdd2bcff17"}, - {file = "scipy-1.7.3-1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b0e0aeb061a1d7dcd2ed59ea57ee56c9b23dd60100825f98238c06ee5cc4467e"}, - {file = "scipy-1.7.3-1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:b78a35c5c74d336f42f44106174b9851c783184a85a3fe3e68857259b37b9ffb"}, {file = "scipy-1.7.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:173308efba2270dcd61cd45a30dfded6ec0085b4b6eb33b5eb11ab443005e088"}, {file = "scipy-1.7.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:21b66200cf44b1c3e86495e3a436fc7a26608f92b8d43d344457c54f1c024cbc"}, {file = "scipy-1.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceebc3c4f6a109777c0053dfa0282fddb8893eddfb0d598574acfb734a926168"}, @@ -5269,22 +5306,22 @@ sortedcontainers = [ {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, ] spacy = [ - {file = "spacy-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eec8e66278fe50a1c0e488c05a583b8a755371999319e002ad08da2ba079b1f5"}, - {file = "spacy-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:208ea0ccc8015c5d4f3d11de627f6adec36058b508756bcdc9372d9ca8e1161d"}, - {file = "spacy-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:9a7c42a548b09d4c280283b33239a89c130f1662bb32b10270438a44dd3fb4bf"}, - {file = "spacy-3.2.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:729cd9bb2fbb04b43e5cffc6f344cc3451e521d78689afc66df4d1cf18e9393a"}, - {file = "spacy-3.2.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2cef9524d1516c143a586be61888b4851dad893e4d1f609050eacb3aa3cc84fb"}, - {file = "spacy-3.2.0-cp36-cp36m-win_amd64.whl", hash = "sha256:db79c01405a54bcc832a5308fbe1f060e004f13ea6a86d483a265d4632b90e72"}, - {file = "spacy-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0750590697ea4588b41cdb8db66baed37d46412f8f385572edb3756106005423"}, - {file = "spacy-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf2fddb68f2465e98a2ca756861f28a41e13452d7e3a0cd3286396267401d4f5"}, - {file = "spacy-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:5cb3ef623511d58759b87fbece2bf6f19d46ce594720c05230185fff6e65f28f"}, - {file = "spacy-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f908937ff8066d73162ea4649f644c312df8a8c2bff0219b98c75fa57054bcbc"}, - {file = "spacy-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef56f77c47389451a9bfa56cc79ac9bd17385fc7ed9e1c08c0cf1ad41dd19a1"}, - {file = "spacy-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:a3956bce38773a685380b79aa89420269309332280588ca012634ed8831724aa"}, - {file = "spacy-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:19eb4f24791c53e9bf7ef5b4112dfe6b2bea9dfc859269344b437e37de2e4654"}, - {file = "spacy-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c5231003ad62e621da080a79eabdddcc38a11964d79c2b70f7a38bcb4b47aed"}, - {file = "spacy-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:a5746672265470300c099a2d2ea2442ba0b84870106d21c69568561db38bba1d"}, - {file = "spacy-3.2.0.tar.gz", hash = "sha256:68e54b2a14ce74eeecea9bfb0b9bdadf8a4a8157765dbefa7e50d25a1bf0f2f3"}, + {file = "spacy-3.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a88d8f3b8849e3097e9e077e752dafcad8f5e557c9aba7099a83891e2153d141"}, + {file = "spacy-3.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91ceedc85846d3e36c0c95142ec2e675bc725c9d0035c0cdb21b40261d047516"}, + {file = "spacy-3.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:cb4ac55c27741a35e1af6f9718d1562d4000256a5673b0a2474b57998cc71825"}, + {file = "spacy-3.2.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:62b822ba097a2f2b61161306a7a9957ccc79dfed6a74ae7eb7572b3c3c153c03"}, + {file = "spacy-3.2.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00d177d58f49df8199bff58ed2bd37b25b1810f53bde41279030c506d8b3ac72"}, + {file = "spacy-3.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:868abe6d5f8ace6059ca4f4eeb6628c18ca40f9446b6fec50e86c42ac234c20d"}, + {file = "spacy-3.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a8edc17de654f676e183173cf54325d22a13a832bd15d77c75a561578930123d"}, + {file = "spacy-3.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf6c35bf6d80dd2d001ff4ff2f6d73e676eec36fe3da4d006c14fe2f96a80f8"}, + {file = "spacy-3.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:cad15271b336c006ed6e4422b02283362cf92fc117886bccfb2d97291fd0c5e4"}, + {file = "spacy-3.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:88d8440ec629df87ab3cc645e292eb9046c03a5b3255141401d367e494d5a51e"}, + {file = "spacy-3.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c8ca5b80e7b0ea3d485a44573d28a18173bf6b808661e4d740d02a985c3bf0d"}, + {file = "spacy-3.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:84b97d86d4a9afbb0b6b7623061ae79ac607460d80aa20eee477e33faf019a67"}, + {file = "spacy-3.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e9f2a5049843a92f4020452c82b697921d26d95560fd185e2d0336cb2607a5df"}, + {file = "spacy-3.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d12b3e6568b7fde01b62ed42d0d578779774d091a33764d0f29def9b75363f81"}, + {file = "spacy-3.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:a9df95ba949774ce6d34a4dacc766068bd812b1081a99375aa7db63df04527dc"}, + {file = "spacy-3.2.1.tar.gz", hash = "sha256:f6ebac511627740a8ca2b117b91ef5515c8f0b2fb117a69ebe01d010dd4fc53c"}, ] spacy-legacy = [ {file = "spacy-legacy-3.0.8.tar.gz", hash = "sha256:b4725c5c161f0685ab4fce3fc912bc68aefdb7e102ba9848e852bb5842256c2f"}, @@ -5295,45 +5332,45 @@ spacy-loggers = [ {file = "spacy_loggers-1.0.1-py3-none-any.whl", hash = "sha256:5e610c980efb831fa428c24fd659e5dd850ea6140c9ed987efe0e8d26df3ee7c"}, ] sqlalchemy = [ - {file = "SQLAlchemy-1.4.27-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:6afa9e4e63f066e0fd90a21db7e95e988d96127f52bfb298a0e9bec6999357a9"}, - {file = "SQLAlchemy-1.4.27-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ec1c908fa721f2c5684900cc8ff75555b1a5a2ae4f5a5694eb0e37a5263cea44"}, - {file = "SQLAlchemy-1.4.27-cp27-cp27m-win32.whl", hash = "sha256:0438bccc16349db2d5203598be6073175ce16d4e53b592d6e6cef880c197333e"}, - {file = "SQLAlchemy-1.4.27-cp27-cp27m-win_amd64.whl", hash = "sha256:435b1980c1333ffe3ab386ad28d7b209590b0fa83ea8544d853e7a22f957331b"}, - {file = "SQLAlchemy-1.4.27-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:486f7916ef77213103467924ef25f5ea1055ae901f385fe4d707604095fdf6a9"}, - {file = "SQLAlchemy-1.4.27-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:d81c84c9d2523b3ea20f8e3aceea68615768a7464c0f9a9899600ce6592ec570"}, - {file = "SQLAlchemy-1.4.27-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5881644fc51af7b232ab8d64f75c0f32295dfe88c2ee188023795cdbd4cf99b"}, - {file = "SQLAlchemy-1.4.27-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:24828c5e74882cf41516740c0b150702bee4c6817d87d5c3d3bafef2e6896f80"}, - {file = "SQLAlchemy-1.4.27-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7d0a1b1258efff7d7f2e6cfa56df580d09ba29d35a1e3f604f867e1f685feb2"}, - {file = "SQLAlchemy-1.4.27-cp310-cp310-win32.whl", hash = "sha256:aadc6d1e58e14010ae4764d1ba1fd0928dbb9423b27a382ea3a1444f903f4084"}, - {file = "SQLAlchemy-1.4.27-cp310-cp310-win_amd64.whl", hash = "sha256:9134e5810262203388b203c2022bbcbf1a22e89861eef9340e772a73dd9076fa"}, - {file = "SQLAlchemy-1.4.27-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:fa52534076394af7315306a8701b726a6521b591d95e8f4e5121c82f94790e8d"}, - {file = "SQLAlchemy-1.4.27-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2717ceae35e71de1f58b0d1ee7e773d3aab5c403c6e79e8d262277c7f7f95269"}, - {file = "SQLAlchemy-1.4.27-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2e93624d186ea7a738ada47314701c8830e0e4b021a6bce7fbe6f39b87ee1516"}, - {file = "SQLAlchemy-1.4.27-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:987fe2f84ceaf744fa0e48805152abe485a9d7002c9923b18a4b2529c7bff218"}, - {file = "SQLAlchemy-1.4.27-cp36-cp36m-win32.whl", hash = "sha256:2146ef996181e3d4dd20eaf1d7325eb62d6c8aa4dc1677c1872ddfa8561a47d9"}, - {file = "SQLAlchemy-1.4.27-cp36-cp36m-win_amd64.whl", hash = "sha256:ad8ec6b69d03e395db48df8991aa15fce3cd23e378b73e01d46a26a6efd5c26d"}, - {file = "SQLAlchemy-1.4.27-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:52f23a76544ed29573c0f3ee41f0ca1aedbab3a453102b60b540cc6fa55448ad"}, - {file = "SQLAlchemy-1.4.27-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd421a14edf73cfe01e8f51ed8966294ee3b3db8da921cacc88e497fd6e977af"}, - {file = "SQLAlchemy-1.4.27-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:10230364479429437f1b819a8839f1edc5744c018bfeb8d01320930f97695bc9"}, - {file = "SQLAlchemy-1.4.27-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78943451ab3ffd0e27876f9cea2b883317518b418f06b90dadf19394534637e9"}, - {file = "SQLAlchemy-1.4.27-cp37-cp37m-win32.whl", hash = "sha256:a81e40dfa50ed3c472494adadba097640bfcf43db160ed783132045eb2093cb1"}, - {file = "SQLAlchemy-1.4.27-cp37-cp37m-win_amd64.whl", hash = "sha256:015511c52c650eebf1059ed8a21674d9d4ae567ebfd80fc73f8252faccd71864"}, - {file = "SQLAlchemy-1.4.27-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:cc49fb8ff103900c20e4a9c53766c82a7ebbc183377fb357a8298bad216e9cdd"}, - {file = "SQLAlchemy-1.4.27-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9369f927f4d19b58322cfea8a51710a3f7c47a0e7f3398d94a4632760ecd74f6"}, - {file = "SQLAlchemy-1.4.27-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6510f4a5029643301bdfe56b61e806093af2101d347d485c42a5535847d2c699"}, - {file = "SQLAlchemy-1.4.27-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:771eca9872b47a629010665ff92de1c248a6979b8d1603daced37773d6f6e365"}, - {file = "SQLAlchemy-1.4.27-cp38-cp38-win32.whl", hash = "sha256:4d1d707b752137e6bf45720648e1b828d5e4881d690df79cca07f7217ea06365"}, - {file = "SQLAlchemy-1.4.27-cp38-cp38-win_amd64.whl", hash = "sha256:c035184af4e58e154b0977eea52131edd096e0754a88f7d5a847e7ccb3510772"}, - {file = "SQLAlchemy-1.4.27-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:bac949be7579fed824887eed6672f44b7c4318abbfb2004b2c6968818b535a2f"}, - {file = "SQLAlchemy-1.4.27-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ac8306e04275d382d6393e557047b0a9d7ddf9f7ca5da9b3edbd9323ea75bd9"}, - {file = "SQLAlchemy-1.4.27-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8327e468b1775c0dfabc3d01f39f440585bf4d398508fcbbe2f0d931c502337d"}, - {file = "SQLAlchemy-1.4.27-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b02eee1577976acb4053f83d32b7826424f8b9f70809fa756529a52c6537eda4"}, - {file = "SQLAlchemy-1.4.27-cp39-cp39-win32.whl", hash = "sha256:5beeff18b4e894f6cb73c8daf2c0d8768844ef40d97032bb187d75b1ec8de24b"}, - {file = "SQLAlchemy-1.4.27-cp39-cp39-win_amd64.whl", hash = "sha256:8dbe5f639e6d035778ebf700be6d573f82a13662c3c2c3aa0f1dba303b942806"}, - {file = "SQLAlchemy-1.4.27.tar.gz", hash = "sha256:d768359daeb3a86644f3854c6659e4496a3e6bba2b4651ecc87ce7ad415b320c"}, + {file = "SQLAlchemy-1.4.29-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:da64423c05256f4ab8c0058b90202053b201cbe3a081f3a43eb590cd554395ab"}, + {file = "SQLAlchemy-1.4.29-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:0fc4eec2f46b40bdd42112b3be3fbbf88e194bcf02950fbb88bcdc1b32f07dc7"}, + {file = "SQLAlchemy-1.4.29-cp27-cp27m-win32.whl", hash = "sha256:101d2e100ba9182c9039699588e0b2d833c54b3bad46c67c192159876c9f27ea"}, + {file = "SQLAlchemy-1.4.29-cp27-cp27m-win_amd64.whl", hash = "sha256:ceac84dd9abbbe115e8be0c817bed85d9fa639b4d294e7817f9e61162d5f766c"}, + {file = "SQLAlchemy-1.4.29-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:15b65887b6c324cad638c7671cb95985817b733242a7eb69edd7cdf6953be1e0"}, + {file = "SQLAlchemy-1.4.29-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:78abc507d17753ed434b6cc0c0693126279723d5656d9775bfcac966a99a899b"}, + {file = "SQLAlchemy-1.4.29-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb8c993706e86178ce15a6b86a335a2064f52254b640e7f53365e716423d33f4"}, + {file = "SQLAlchemy-1.4.29-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:804e22d5b6165a4f3f019dd9c94bec5687de985a9c54286b93ded9f7846b8c82"}, + {file = "SQLAlchemy-1.4.29-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56d9d62021946263d4478c9ca012fbd1805f10994cb615c88e7bfd1ae14604d8"}, + {file = "SQLAlchemy-1.4.29-cp310-cp310-win32.whl", hash = "sha256:027f356c727db24f3c75828c7feb426f87ce1241242d08958e454bd025810660"}, + {file = "SQLAlchemy-1.4.29-cp310-cp310-win_amd64.whl", hash = "sha256:debaf09a823061f88a8dee04949814cf7e82fb394c5bca22c780cb03172ca23b"}, + {file = "SQLAlchemy-1.4.29-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:dc27dcc6c72eb38be7f144e9c2c4372d35a3684d3a6dd43bd98c1238358ee17c"}, + {file = "SQLAlchemy-1.4.29-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4ddd4f2e247128c58bb3dd4489922874afce157d2cff0b2295d67fcd0f22494"}, + {file = "SQLAlchemy-1.4.29-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9ce960a1dc60524136cf6f75621588e2508a117e04a6e3eedb0968bd13b8c824"}, + {file = "SQLAlchemy-1.4.29-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5919e647e1d4805867ea556ed4967c68b4d8b266059fa35020dbaed8ffdd60f3"}, + {file = "SQLAlchemy-1.4.29-cp36-cp36m-win32.whl", hash = "sha256:886359f734b95ad1ef443b13bb4518bcade4db4f9553c9ce33d6d04ebda8d44e"}, + {file = "SQLAlchemy-1.4.29-cp36-cp36m-win_amd64.whl", hash = "sha256:e9cc6d844e24c307c3272677982a9b33816aeb45e4977791c3bdd47637a8d810"}, + {file = "SQLAlchemy-1.4.29-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:5e9cd33459afa69c88fa648e803d1f1245e3caa60bfe8b80a9595e5edd3bda9c"}, + {file = "SQLAlchemy-1.4.29-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eeaebceb24b46e884c4ad3c04f37feb178b81f6ce720af19bfa2592ca32fdef7"}, + {file = "SQLAlchemy-1.4.29-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e89347d3bd2ef873832b47e85f4bbd810a5e626c5e749d90a07638da100eb1c8"}, + {file = "SQLAlchemy-1.4.29-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a717c2e70fd1bb477161c4cc85258e41d978584fbe5522613618195f7e87d9b"}, + {file = "SQLAlchemy-1.4.29-cp37-cp37m-win32.whl", hash = "sha256:f74d6c05d2d163464adbdfbc1ab85048cc15462ff7d134b8aed22bd521e1faa5"}, + {file = "SQLAlchemy-1.4.29-cp37-cp37m-win_amd64.whl", hash = "sha256:621854dbb4d2413c759a5571564170de45ef37299df52e78e62b42e2880192e1"}, + {file = "SQLAlchemy-1.4.29-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:f3909194751bb6cb7c5511dd18bcf77e6e3f0b31604ed4004dffa9461f71e737"}, + {file = "SQLAlchemy-1.4.29-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd49d21d1f03c81fbec9080ecdc4486d5ddda67e7fbb75ebf48294465c022cdc"}, + {file = "SQLAlchemy-1.4.29-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e5f6959466a42b6569774c257e55f9cd85200d5b0ba09f0f5d8b5845349c5822"}, + {file = "SQLAlchemy-1.4.29-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0072f9887aabe66db23f818bbe950cfa1b6127c5cb769b00bcc07935b3adb0ad"}, + {file = "SQLAlchemy-1.4.29-cp38-cp38-win32.whl", hash = "sha256:ad618d687d26d4cbfa9c6fa6141d59e05bcdfc60cb6e1f1d3baa18d8c62fef5f"}, + {file = "SQLAlchemy-1.4.29-cp38-cp38-win_amd64.whl", hash = "sha256:878daecb6405e786b07f97e1c77a9cfbbbec17432e8a90c487967e32cfdecb33"}, + {file = "SQLAlchemy-1.4.29-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:e027bdf0a4cf6bd0a3ad3b998643ea374d7991bd117b90bf9982e41ceb742941"}, + {file = "SQLAlchemy-1.4.29-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5de7adfb91d351f44062b8dedf29f49d4af7cb765be65816e79223a4e31062b"}, + {file = "SQLAlchemy-1.4.29-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fbc6e63e481fa323036f305ada96a3362e1d60dd2bfa026cac10c3553e6880e9"}, + {file = "SQLAlchemy-1.4.29-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7dd0502cb091660ad0d89c5e95a29825f37cde2a5249957838e975871fbffaad"}, + {file = "SQLAlchemy-1.4.29-cp39-cp39-win32.whl", hash = "sha256:37b46bfc4af3dc226acb6fa28ecd2e1fd223433dc5e15a2bad62bf0a0cbb4e8b"}, + {file = "SQLAlchemy-1.4.29-cp39-cp39-win_amd64.whl", hash = "sha256:08cfd35eecaba79be930c9bfd2e1f0c67a7e1314355d83a378f9a512b1cf7587"}, + {file = "SQLAlchemy-1.4.29.tar.gz", hash = "sha256:fa2bad14e1474ba649cfc969c1d2ec915dd3e79677f346bbfe08e93ef9020b39"}, ] srsly = [ - {file = "srsly-2.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:834229df7377386e9990fd245e1ae12a72152997fd159a782a798b638721a7b2"}, + {file = "srsly-2.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5e22bbc1a20abf749fa53adf101c36bc369ec63f496c7a44bf4f5f287d724900"}, {file = "srsly-2.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004d29a5abc0fe632434359c0be170490a69c4dce2c3de8a769944c37da7bb4b"}, {file = "srsly-2.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7ced7ec4993b4d4ad73cc442f8f7a518368348054d510864b1aa149e8d71654d"}, {file = "srsly-2.4.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:801c7e6e32c6a4721ab78ab7dafd01074fdb144f4876c09b25305c98f95c470f"}, @@ -5371,7 +5408,7 @@ tensorboard-data-server = [ {file = "tensorboard_data_server-0.6.1-py3-none-manylinux2010_x86_64.whl", hash = "sha256:d8237580755e58eff68d1f3abefb5b1e39ae5c8b127cc40920f9c4fb33f4b98a"}, ] tensorboard-plugin-wit = [ - {file = "tensorboard_plugin_wit-1.8.0-py3-none-any.whl", hash = "sha256:2a80d1c551d741e99b2f197bb915d8a133e24adb8da1732b840041860f91183a"}, + {file = "tensorboard_plugin_wit-1.8.1-py3-none-any.whl", hash = "sha256:ff26bdd583d155aa951ee3b152b3d0cffae8005dc697f72b44a8e8c2a77a8cbe"}, ] tensorflow = [ {file = "tensorflow-2.6.1-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:dd0036a6fdb78958dcab3bdc4b3df23d7b9ab80ca51c03f2a84b4d354bbf36a1"}, @@ -5428,10 +5465,11 @@ termcolor = [ {file = "termcolor-1.1.0.tar.gz", hash = "sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b"}, ] terminaltables = [ - {file = "terminaltables-3.1.0.tar.gz", hash = "sha256:f3eb0eb92e3833972ac36796293ca0906e998dc3be91fbe1f8615b331b853b81"}, + {file = "terminaltables-3.1.10-py2.py3-none-any.whl", hash = "sha256:e4fdc4179c9e4aab5f674d80f09d76fa436b96fdc698a8505e0a36bf0804a874"}, + {file = "terminaltables-3.1.10.tar.gz", hash = "sha256:ba6eca5cb5ba02bba4c9f4f985af80c54ec3dccf94cfcd190154386255e47543"}, ] thinc = [ - {file = "thinc-8.0.13-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ad8794a76725b85847528fd1a56471d5ac00f4104da8efb065ba572238e381a2"}, + {file = "thinc-8.0.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f818b9f012169a11beb3561c43dc52080588e50cf495733e492efab8b9b4135e"}, {file = "thinc-8.0.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f520daf45b7f42a04363852df43be1b423ae42d9327709d74f6c3279b3f73778"}, {file = "thinc-8.0.13-cp310-cp310-win_amd64.whl", hash = "sha256:2b217059c9e126220b77e7d6c9da56912c4e1eb4e8a11af14f17752e198e88cc"}, {file = "thinc-8.0.13-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0f956c693d180209075703072fd226a24408cbe80eb67bd3b6eea407f61cb283"}, @@ -5475,6 +5513,10 @@ toml = [ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] +tomli = [ + {file = "tomli-1.2.3-py3-none-any.whl", hash = "sha256:e3069e4be3ead9668e21cb9b074cd948f7b3113fd9c8bba083f48247aab8b11c"}, + {file = "tomli-1.2.3.tar.gz", hash = "sha256:05b6166bff487dc068d322585c7ea4ef78deed501cc124060e0f238e89a9231f"}, +] toolz = [ {file = "toolz-0.11.2-py3-none-any.whl", hash = "sha256:a5700ce83414c64514d82d60bcda8aabfde092d1c1a8663f9200c07fdcc6da8f"}, {file = "toolz-0.11.2.tar.gz", hash = "sha256:6b312d5e15138552f1bda8a4e66c30e236c831b612b2bf0005f8a1df10a4bc33"}, @@ -5527,8 +5569,8 @@ typed-ast = [ {file = "typed_ast-1.4.3.tar.gz", hash = "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"}, ] typeguard = [ - {file = "typeguard-2.13.2-py3-none-any.whl", hash = "sha256:4f7da3d80dda5e42d6973f11f33da3542b8bf86edc12ba926b2dbad62adf3fcf"}, - {file = "typeguard-2.13.2.tar.gz", hash = "sha256:7e50071590ab997509aa0977609eb5cf9d73d84c1f416cb4fab78b77a9d15326"}, + {file = "typeguard-2.13.3-py3-none-any.whl", hash = "sha256:5e3e3be01e887e7eafae5af63d1f36c849aaa94e3a0112097312aabfa16284f1"}, + {file = "typeguard-2.13.3.tar.gz", hash = "sha256:00edaa8da3a133674796cf5ea87d9f4b4c367d77476e185e80251cc13dfbb8c4"}, ] typer = [ {file = "typer-0.4.0-py3-none-any.whl", hash = "sha256:d81169725140423d072df464cad1ff25ee154ef381aaf5b8225352ea187ca338"}, @@ -5543,16 +5585,20 @@ types-python-dateutil = [ {file = "types_python_dateutil-0.1.6-py3-none-any.whl", hash = "sha256:5b6241ea9fca2d8878cc152017d9524da62a7a856b98e31006e68b02aab47442"}, ] types-pytz = [ - {file = "types-pytz-2021.3.1.tar.gz", hash = "sha256:dffd77f3efecd3b1555f187a9bf3a638d55fac296700b829c41bd51ec72a6eb7"}, - {file = "types_pytz-2021.3.1-py3-none-any.whl", hash = "sha256:d58a0688094b768d8e21c044e45861cbcaecba0494fd5b9c5feb3e1739211606"}, + {file = "types-pytz-2021.3.4.tar.gz", hash = "sha256:101da53091013bb07403468c20d36930d749d3918054ac46f9c1bfc607dadf7d"}, + {file = "types_pytz-2021.3.4-py3-none-any.whl", hash = "sha256:ccfa2ed29f816e3de2f882541c06ad2791f808a79cfe38265411820190999f0f"}, ] types-requests = [ - {file = "types-requests-2.26.1.tar.gz", hash = "sha256:0893e112e1510bbb67f537941c92192de7472e51bf7f236e0e583866f0ed933e"}, - {file = "types_requests-2.26.1-py3-none-any.whl", hash = "sha256:853571b3accc188976c0f4feffcaebf6cdfc170082b5e43f3358aa78de61f531"}, + {file = "types-requests-2.27.7.tar.gz", hash = "sha256:f38bd488528cdcbce5b01dc953972f3cead0d060cfd9ee35b363066c25bab13c"}, + {file = "types_requests-2.27.7-py3-none-any.whl", hash = "sha256:2e0e100dd489f83870d4f61949d3a7eae4821e7bfbf46c57e463c38f92d473d4"}, ] types-setuptools = [ - {file = "types-setuptools-57.4.4.tar.gz", hash = "sha256:a3cbcbf3f02142bb5d3b5c5f5918f453b8752362b96d58aba2a5cfa43ba6d209"}, - {file = "types_setuptools-57.4.4-py3-none-any.whl", hash = "sha256:9fe4180548e5cbb44cc0d343a47e4dc1d6769c31e16447994788c28df169011f"}, + {file = "types-setuptools-57.4.7.tar.gz", hash = "sha256:9677d969b00ec1c14552f5be2b2b47a6fbea4d0ed4de0fdcee18abdaa0cc9267"}, + {file = "types_setuptools-57.4.7-py3-none-any.whl", hash = "sha256:ffda504687ea02d4b7751c0d1df517fbbcdc276836d90849e4f1a5f1ccd79f01"}, +] +types-urllib3 = [ + {file = "types-urllib3-1.26.7.tar.gz", hash = "sha256:cfd1fbbe4ba9a605ed148294008aac8a7b8b7472651d1cc357d507ae5962e3d2"}, + {file = "types_urllib3-1.26.7-py3-none-any.whl", hash = "sha256:3adcf2cb5981809091dbff456e6999fe55f201652d8c360f99997de5ac2f556e"}, ] typing-extensions = [ {file = "typing_extensions-3.7.4.3-py2-none-any.whl", hash = "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f"}, @@ -5618,8 +5664,8 @@ uritemplate = [ {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, ] urllib3 = [ - {file = "urllib3-1.26.7-py2.py3-none-any.whl", hash = "sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844"}, - {file = "urllib3-1.26.7.tar.gz", hash = "sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece"}, + {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"}, + {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"}, ] uvloop = [ {file = "uvloop-0.14.0-cp35-cp35m-macosx_10_11_x86_64.whl", hash = "sha256:08b109f0213af392150e2fe6f81d33261bb5ce968a288eb698aad4f46eb711bd"}, @@ -5799,6 +5845,6 @@ yarl = [ {file = "yarl-1.7.2.tar.gz", hash = "sha256:45399b46d60c253327a460e99856752009fcee5f5d3c80b2f7c0cae1c38d56dd"}, ] zipp = [ - {file = "zipp-3.6.0-py3-none-any.whl", hash = "sha256:9fe5ea21568a0a70e50f273397638d39b03353731e6cbbb3fd8502a33fec40bc"}, - {file = "zipp-3.6.0.tar.gz", hash = "sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832"}, + {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, + {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"}, ] diff --git a/pyproject.toml b/pyproject.toml index 40c4ba393819..f2aaf167ba7d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -142,7 +142,7 @@ aioresponses = "^0.7.2" moto = "~=2.2.6" fakeredis = "^1.5.2" mongomock = "^3.18.0" -black = "^19.10b0" +black = "^21.7b0" flake8 = "^3.8.3" flake8-docstrings = "^1.5.0" google-cloud-storage = "^1.40.0" diff --git a/rasa/cli/arguments/evaluate.py b/rasa/cli/arguments/evaluate.py index 11833e0c4acc..189f486850a9 100644 --- a/rasa/cli/arguments/evaluate.py +++ b/rasa/cli/arguments/evaluate.py @@ -42,7 +42,7 @@ def set_markers_arguments(parser: argparse.ArgumentParser) -> None: ) add_endpoint_param( - parser, help_text="Configuration file for the tracker store as a yml file.", + parser, help_text="Configuration file for the tracker store as a yml file." ) add_domain_param(parser) @@ -51,7 +51,7 @@ def set_markers_arguments(parser: argparse.ArgumentParser) -> None: def set_markers_first_n_arguments(parser: argparse.ArgumentParser) -> None: """Specifies arguments for `rasa evaluate markers first_n`.""" parser.add_argument( - "count", type=int, help="The number of trackers to extract markers from", + "count", type=int, help="The number of trackers to extract markers from" ) @@ -61,5 +61,5 @@ def set_markers_sample_arguments(parser: argparse.ArgumentParser) -> None: "--seed", type=int, help="Seed to use if selecting trackers by 'sample_n'" ) parser.add_argument( - "count", type=int, help="The number of trackers to extract markers from", + "count", type=int, help="The number of trackers to extract markers from" ) diff --git a/rasa/cli/arguments/run.py b/rasa/cli/arguments/run.py index 14bfcdc7ac91..bb56f72e603c 100644 --- a/rasa/cli/arguments/run.py +++ b/rasa/cli/arguments/run.py @@ -56,7 +56,7 @@ def add_server_arguments(parser: argparse.ArgumentParser) -> None: help="Store logs in specified file.", ) parser.add_argument( - "--use-syslog", action="store_true", help="Add syslog as a log handler", + "--use-syslog", action="store_true", help="Add syslog as a log handler" ) parser.add_argument( "--syslog-address", diff --git a/rasa/cli/data.py b/rasa/cli/data.py index 1e89d8175d40..3b56cc620815 100644 --- a/rasa/cli/data.py +++ b/rasa/cli/data.py @@ -8,10 +8,7 @@ from rasa.cli.arguments import data as arguments from rasa.cli.arguments import default_arguments import rasa.cli.utils -from rasa.shared.constants import ( - DEFAULT_DATA_PATH, - DEFAULT_CONFIG_PATH, -) +from rasa.shared.constants import DEFAULT_DATA_PATH, DEFAULT_CONFIG_PATH import rasa.shared.data from rasa.shared.importers.rasa import RasaFileImporter import rasa.shared.nlu.training_data.loading @@ -167,7 +164,7 @@ def validate_files(args: argparse.Namespace, stories_only: bool = False) -> None ) file_importer = RasaFileImporter( - domain_path=args.domain, training_data_paths=args.data, config_file=config, + domain_path=args.domain, training_data_paths=args.data, config_file=config ) validator = Validator.from_importer(file_importer) diff --git a/rasa/cli/evaluate.py b/rasa/cli/evaluate.py index afe63706002e..66ddc0c8cd52 100644 --- a/rasa/cli/evaluate.py +++ b/rasa/cli/evaluate.py @@ -211,4 +211,4 @@ def _create_tracker_loader( """ endpoints = AvailableEndpoints.read_endpoints(endpoint_config) tracker_store = TrackerStore.create(endpoints.tracker_store, domain=domain) - return MarkerTrackerLoader(tracker_store, strategy, count, seed,) + return MarkerTrackerLoader(tracker_store, strategy, count, seed) diff --git a/rasa/cli/scaffold.py b/rasa/cli/scaffold.py index 96e712a0c7b0..b5f0f1e50f56 100644 --- a/rasa/cli/scaffold.py +++ b/rasa/cli/scaffold.py @@ -208,7 +208,7 @@ def run(args: argparse.Namespace) -> None: path = ( questionary.text( "Please enter a path where the project will be " - "created [default: current directory]", + "created [default: current directory]" ) .skip_if(args.no_prompt, default="") .ask() diff --git a/rasa/cli/telemetry.py b/rasa/cli/telemetry.py index 139b5b09356a..f5b810fc760e 100644 --- a/rasa/cli/telemetry.py +++ b/rasa/cli/telemetry.py @@ -10,7 +10,7 @@ def add_subparser( - subparsers: SubParsersAction, parents: List[argparse.ArgumentParser], + subparsers: SubParsersAction, parents: List[argparse.ArgumentParser] ) -> None: """Add all telemetry tracking parsers. @@ -18,7 +18,6 @@ def add_subparser( subparsers: subparser we are going to attach to parents: Parent parsers, needed to ensure tree structure in argparse """ - telemetry_parser = subparsers.add_parser( "telemetry", parents=parents, diff --git a/rasa/cli/test.py b/rasa/cli/test.py index 0e326b5d1fab..4ecc93646053 100644 --- a/rasa/cli/test.py +++ b/rasa/cli/test.py @@ -183,7 +183,7 @@ async def run_nlu_test_async( data_path = rasa.cli.utils.get_validated_path(data_path, "nlu", DEFAULT_DATA_PATH) test_data_importer = TrainingDataImporter.load_from_dict( - training_data_paths=[data_path], domain_path=DEFAULT_DOMAIN_PATH, + training_data_paths=[data_path], domain_path=DEFAULT_DOMAIN_PATH ) nlu_data = test_data_importer.get_nlu_data() @@ -205,7 +205,7 @@ async def run_nlu_test_async( for file in config: try: validation_utils.validate_yaml_schema( - rasa.shared.utils.io.read_file(file), CONFIG_SCHEMA_FILE, + rasa.shared.utils.io.read_file(file), CONFIG_SCHEMA_FILE ) config_files.append(file) except YamlException: diff --git a/rasa/cli/x.py b/rasa/cli/x.py index 30590d7da58c..64e97c5c4872 100644 --- a/rasa/cli/x.py +++ b/rasa/cli/x.py @@ -423,7 +423,7 @@ def run_in_production(args: argparse.Namespace) -> None: _rasa_service(args, endpoints, None, credentials_path) -def _get_config_path(args: argparse.Namespace,) -> Optional[Text]: +def _get_config_path(args: argparse.Namespace) -> Optional[Text]: config_path = rasa.cli.utils.get_validated_path( args.config, "config", DEFAULT_CONFIG_PATH ) @@ -431,7 +431,7 @@ def _get_config_path(args: argparse.Namespace,) -> Optional[Text]: return config_path -def _get_domain_path(args: argparse.Namespace,) -> Optional[Text]: +def _get_domain_path(args: argparse.Namespace) -> Optional[Text]: domain_path = rasa.cli.utils.get_validated_path( args.domain, "domain", DEFAULT_DOMAIN_PATH ) diff --git a/rasa/core/actions/action.py b/rasa/core/actions/action.py index 34cffc065d2b..257d6ccaba6b 100644 --- a/rasa/core/actions/action.py +++ b/rasa/core/actions/action.py @@ -923,9 +923,9 @@ async def run( intent_to_affirm = latest_message.intent.get(INTENT_NAME_KEY) - intent_ranking: List["IntentPrediction"] = latest_message.parse_data.get( - INTENT_RANKING_KEY - ) or [] + intent_ranking: List["IntentPrediction"] = ( + latest_message.parse_data.get(INTENT_RANKING_KEY) or [] + ) if ( intent_to_affirm == DEFAULT_NLU_FALLBACK_INTENT_NAME and len(intent_ranking) > 1 @@ -1076,7 +1076,7 @@ async def _execute_custom_action( return [], executed_custom_actions slot_events = await self._run_custom_action( - custom_action, output_channel, nlg, tracker, domain, + custom_action, output_channel, nlg, tracker, domain ) executed_custom_actions.add(custom_action) @@ -1236,7 +1236,7 @@ def extract_slot_value_from_predefined_mapping( """Extracts slot value if slot has an applicable predefined mapping.""" should_fill_entity_slot = ( mapping_type == SlotMappingType.FROM_ENTITY - and SlotMapping.entity_is_desired(mapping, tracker,) + and SlotMapping.entity_is_desired(mapping, tracker) ) should_fill_intent_slot = mapping_type == SlotMappingType.FROM_INTENT diff --git a/rasa/core/actions/forms.py b/rasa/core/actions/forms.py index 09761483eb3f..5cd662f395d2 100644 --- a/rasa/core/actions/forms.py +++ b/rasa/core/actions/forms.py @@ -344,7 +344,7 @@ def _update_slot_values( return slot_values def _add_dynamic_slots_requested_by_dynamic_forms( - self, tracker: "DialogueStateTracker", domain: Domain, + self, tracker: "DialogueStateTracker", domain: Domain ) -> Set[Text]: required_slots = set(self.required_slots(domain)) requested_slot = self.get_slot_to_fill(tracker) @@ -355,7 +355,7 @@ def _add_dynamic_slots_requested_by_dynamic_forms( return required_slots def _get_slot_extractions( - self, tracker: "DialogueStateTracker", domain: Domain, + self, tracker: "DialogueStateTracker", domain: Domain ) -> Dict[Text, Any]: events_since_last_user_uttered = FormAction._get_events_since_last_user_uttered( tracker diff --git a/rasa/core/agent.py b/rasa/core/agent.py index eac8f7cbf204..444cb67b76e0 100644 --- a/rasa/core/agent.py +++ b/rasa/core/agent.py @@ -5,15 +5,7 @@ import os import tempfile from pathlib import Path -from typing import ( - Any, - Callable, - Dict, - List, - Optional, - Text, - Union, -) +from typing import Any, Callable, Dict, List, Optional, Text, Union import uuid import aiohttp @@ -30,10 +22,7 @@ from rasa.core.nlg import NaturalLanguageGenerator from rasa.core.policies.policy import PolicyPrediction from rasa.core.processor import MessageProcessor -from rasa.core.tracker_store import ( - FailSafeTrackerStore, - InMemoryTrackerStore, -) +from rasa.core.tracker_store import FailSafeTrackerStore, InMemoryTrackerStore from rasa.shared.core.trackers import DialogueStateTracker, EventVerbosity from rasa.exceptions import ModelNotFound from rasa.nlu.utils import is_url @@ -357,7 +346,7 @@ def load( return agent def load_model( - self, model_path: Union[Text, Path], fingerprint: Optional[Text] = None, + self, model_path: Union[Text, Path], fingerprint: Optional[Text] = None ) -> None: """Loads the agent's model and processor given a new model path.""" self.processor = MessageProcessor( @@ -419,7 +408,7 @@ async def parse_message(self, message_data: Text) -> Dict[Text, Any]: return await self.processor.parse_message(message) async def handle_message( - self, message: UserMessage, + self, message: UserMessage ) -> Optional[List[Dict[Text, Any]]]: """Handle a single message.""" if not self.is_ready(): @@ -446,7 +435,7 @@ def predict_next_with_tracker( return self.processor.predict_next_with_tracker(tracker, verbosity) @agent_must_be_ready - async def log_message(self, message: UserMessage,) -> DialogueStateTracker: + async def log_message(self, message: UserMessage) -> DialogueStateTracker: """Append a message to a dialogue - does not predict actions.""" return await self.processor.log_message(message) diff --git a/rasa/core/brokers/kafka.py b/rasa/core/brokers/kafka.py index f0b33dd545aa..ce1078cfccfb 100644 --- a/rasa/core/brokers/kafka.py +++ b/rasa/core/brokers/kafka.py @@ -142,7 +142,7 @@ def _create_producer(self) -> None: if self.security_protocol == "PLAINTEXT": authentication_params = dict( - security_protocol=self.security_protocol, ssl_check_hostname=False, + security_protocol=self.security_protocol, ssl_check_hostname=False ) elif self.security_protocol == "SASL_PLAINTEXT": authentication_params = dict( diff --git a/rasa/core/channels/socketio.py b/rasa/core/channels/socketio.py index 52fdf8c0289f..bdae6e1aff9d 100644 --- a/rasa/core/channels/socketio.py +++ b/rasa/core/channels/socketio.py @@ -3,11 +3,7 @@ from typing import Any, Awaitable, Callable, Dict, Iterable, List, Optional, Text import rasa.core.channels.channel -from rasa.core.channels.channel import ( - InputChannel, - OutputChannel, - UserMessage, -) +from rasa.core.channels.channel import InputChannel, OutputChannel, UserMessage import rasa.shared.utils.io from sanic import Blueprint, response, Sanic from sanic.request import Request @@ -197,7 +193,7 @@ async def connect(sid: Text, environ: Dict, auth: Optional[Dict]) -> bool: jwt_payload = None if auth and auth.get("token"): jwt_payload = rasa.core.channels.channel.decode_bearer_token( - auth.get("token"), self.jwt_key, self.jwt_algorithm, + auth.get("token"), self.jwt_key, self.jwt_algorithm ) if jwt_payload: diff --git a/rasa/core/channels/twilio_voice.py b/rasa/core/channels/twilio_voice.py index 2f83f3194bd5..7faae1ee5ed9 100644 --- a/rasa/core/channels/twilio_voice.py +++ b/rasa/core/channels/twilio_voice.py @@ -239,9 +239,7 @@ async def receive(request: Request) -> HTTPResponse: # determine the response. if text is not None: await on_new_message( - UserMessage( - text, collector, sender_id, input_channel=input_channel, - ) + UserMessage(text, collector, sender_id, input_channel=input_channel) ) twilio_response = self._build_twilio_voice_response(collector.messages) @@ -313,7 +311,7 @@ def name(cls) -> Text: return "twilio_voice" @staticmethod - def _emoji_warning(text: Text,) -> None: + def _emoji_warning(text: Text) -> None: """Raises a warning if text contains an emoji.""" emoji_regex = rasa.utils.io.get_emoji_regex() if emoji_regex.findall(text): diff --git a/rasa/core/evaluation/marker_base.py b/rasa/core/evaluation/marker_base.py index 76ad1ffe747f..225af97c7031 100644 --- a/rasa/core/evaluation/marker_base.py +++ b/rasa/core/evaluation/marker_base.py @@ -567,11 +567,11 @@ def from_config(config: Any, name: Optional[Text] = None) -> Marker: tag, _ = MarkerRegistry.get_non_negated_tag(tag_or_negated_tag=tag) if tag in MarkerRegistry.operator_tag_to_marker_class: marker = OperatorMarker.from_tag_and_sub_config( - tag=tag, sub_config=sub_marker_config, name=name, + tag=tag, sub_config=sub_marker_config, name=name ) elif tag in MarkerRegistry.condition_tag_to_marker_class: marker = ConditionMarker.from_tag_and_sub_config( - tag=tag, sub_config=sub_marker_config, name=name, + tag=tag, sub_config=sub_marker_config, name=name ) else: raise InvalidMarkerConfig( @@ -671,7 +671,7 @@ def _save_results(path: Path, results: Dict[Text, List[SessionEvaluation]]) -> N @staticmethod def _write_relevant_events( - writer: WriteRow, sender_id: Text, session_idx: int, session: SessionEvaluation, + writer: WriteRow, sender_id: Text, session_idx: int, session: SessionEvaluation ) -> None: for marker_name, meta_data_per_relevant_event in session.items(): for event_meta_data in meta_data_per_relevant_event: @@ -775,7 +775,7 @@ def max_depth(self) -> int: @staticmethod def from_tag_and_sub_config( - tag: Text, sub_config: Any, name: Optional[Text] = None, + tag: Text, sub_config: Any, name: Optional[Text] = None ) -> OperatorMarker: """Creates an operator marker from the given config. diff --git a/rasa/core/evaluation/marker_stats.py b/rasa/core/evaluation/marker_stats.py index a443dae7b588..2812a0a811ab 100644 --- a/rasa/core/evaluation/marker_stats.py +++ b/rasa/core/evaluation/marker_stats.py @@ -188,13 +188,7 @@ def per_session_statistics_to_csv( @staticmethod def _header() -> List[Text]: - return [ - "sender_id", - "session_idx", - "marker", - "statistic", - "value", - ] + return ["sender_id", "session_idx", "marker", "statistic", "value"] def _write_overview(self, table_writer: WriteRow) -> None: special_sender_idx = self.ALL_SENDERS diff --git a/rasa/core/featurizers/precomputation.py b/rasa/core/featurizers/precomputation.py index 474d9e01a5a1..febbb8abe506 100644 --- a/rasa/core/featurizers/precomputation.py +++ b/rasa/core/featurizers/precomputation.py @@ -324,7 +324,7 @@ def create( return cls() def convert_for_training( - self, domain: Domain, story_graph: StoryGraph, + self, domain: Domain, story_graph: StoryGraph ) -> TrainingData: """Creates de-duplicated training data. diff --git a/rasa/core/featurizers/single_state_featurizer.py b/rasa/core/featurizers/single_state_featurizer.py index 1c0ce59677bf..c9e657aed473 100644 --- a/rasa/core/featurizers/single_state_featurizer.py +++ b/rasa/core/featurizers/single_state_featurizer.py @@ -84,9 +84,7 @@ def _create_entity_tag_specs( ) ] - def prepare_for_training( - self, domain: Domain, bilou_tagging: bool = False, - ) -> None: + def prepare_for_training(self, domain: Domain, bilou_tagging: bool = False) -> None: """Gets necessary information for featurization from domain. Args: @@ -258,7 +256,7 @@ def encode_state( if state_type == PREVIOUS_ACTION: state_features.update( self._extract_state_features( - sub_state, precomputations=precomputations, sparse=True, + sub_state, precomputations=precomputations, sparse=True ) ) # featurize user only if it is "real" user input, @@ -267,7 +265,7 @@ def encode_state( state_features.update( self._extract_state_features( - sub_state, precomputations=precomputations, sparse=True, + sub_state, precomputations=precomputations, sparse=True ) ) if sub_state.get(ENTITIES): diff --git a/rasa/core/featurizers/tracker_featurizers.py b/rasa/core/featurizers/tracker_featurizers.py index 95a23e0ef044..1abb310f4de3 100644 --- a/rasa/core/featurizers/tracker_featurizers.py +++ b/rasa/core/featurizers/tracker_featurizers.py @@ -155,7 +155,7 @@ def _create_entity_tags( return [ [ self.state_featurizer.encode_entities( - entity_data, precomputations, bilou_tagging, + entity_data, precomputations, bilou_tagging ) for entity_data in trackers_entities ] @@ -256,7 +256,7 @@ def training_states_labels_and_entities( ) def prepare_for_featurization( - self, domain: Domain, bilou_tagging: bool = False, + self, domain: Domain, bilou_tagging: bool = False ) -> None: """Ensures that the featurizer is ready to be called during training. diff --git a/rasa/core/http_interpreter.py b/rasa/core/http_interpreter.py index fed2bfaa8fec..c65d9af26ce4 100644 --- a/rasa/core/http_interpreter.py +++ b/rasa/core/http_interpreter.py @@ -22,7 +22,7 @@ def __init__(self, endpoint_config: Optional[EndpointConfig] = None) -> None: else: self.endpoint_config = EndpointConfig(constants.DEFAULT_SERVER_URL) - async def parse(self, message: UserMessage,) -> Dict[Text, Any]: + async def parse(self, message: UserMessage) -> Dict[Text, Any]: """Parse a text message. Return a default value if the parsing of the text failed. diff --git a/rasa/core/migrate.py b/rasa/core/migrate.py index 180ab6f250ea..89a7691ab52f 100644 --- a/rasa/core/migrate.py +++ b/rasa/core/migrate.py @@ -5,10 +5,7 @@ import rasa.shared.utils.io import rasa.shared.utils.cli -from rasa.shared.constants import ( - REQUIRED_SLOTS_KEY, - IGNORED_INTENTS, -) +from rasa.shared.constants import REQUIRED_SLOTS_KEY, IGNORED_INTENTS from rasa.shared.core.constants import ( ACTIVE_LOOP, REQUESTED_SLOT, @@ -16,12 +13,7 @@ MAPPING_TYPE, SLOT_MAPPINGS, ) -from rasa.shared.core.domain import ( - KEY_ENTITIES, - KEY_SLOTS, - KEY_FORMS, - Domain, -) +from rasa.shared.core.domain import KEY_ENTITIES, KEY_SLOTS, KEY_FORMS, Domain from rasa.shared.exceptions import RasaException @@ -114,7 +106,7 @@ def _migrate_form_slots( def _migrate_auto_fill( - slot_name: Text, properties: Dict[Text, Any], entities: List[Text], + slot_name: Text, properties: Dict[Text, Any], entities: List[Text] ) -> Dict[Text, Any]: if slot_name in entities and properties.get("auto_fill", True) is True: from_entity_mapping = { @@ -179,7 +171,7 @@ def _assemble_new_domain( def _write_final_domain( - domain_file: Path, new_forms: Dict, new_slots: Dict, out_file: Path, + domain_file: Path, new_forms: Dict, new_slots: Dict, out_file: Path ) -> None: if domain_file.is_dir(): for file in domain_file.iterdir(): diff --git a/rasa/core/nlg/response.py b/rasa/core/nlg/response.py index 6c2da000fd23..2da05d2130af 100644 --- a/rasa/core/nlg/response.py +++ b/rasa/core/nlg/response.py @@ -27,7 +27,7 @@ def __init__(self, responses: Dict[Text, List[Dict[Text, Any]]]) -> None: self.responses = responses def _matches_filled_slots( - self, filled_slots: Dict[Text, Any], response: Dict[Text, Any], + self, filled_slots: Dict[Text, Any], response: Dict[Text, Any] ) -> bool: """Checks if the conditional response variation matches the filled slots.""" constraints = response.get(RESPONSE_CONDITION) diff --git a/rasa/core/policies/ensemble.py b/rasa/core/policies/ensemble.py index 5a6327ecb5eb..657e423a9723 100644 --- a/rasa/core/policies/ensemble.py +++ b/rasa/core/policies/ensemble.py @@ -100,7 +100,7 @@ def combine_predictions_from_kwargs( value for value in kwargs.values() if isinstance(value, PolicyPrediction) ] return self.combine_predictions( - predictions=predictions, tracker=tracker, domain=domain, + predictions=predictions, tracker=tracker, domain=domain ) @abstractmethod diff --git a/rasa/core/policies/memoization.py b/rasa/core/policies/memoization.py index d90935319062..2f687de140b4 100644 --- a/rasa/core/policies/memoization.py +++ b/rasa/core/policies/memoization.py @@ -21,11 +21,7 @@ from rasa.core.featurizers.tracker_featurizers import MaxHistoryTrackerFeaturizer from rasa.core.featurizers.tracker_featurizers import FEATURIZER_FILE from rasa.shared.exceptions import FileIOException -from rasa.core.policies.policy import ( - PolicyPrediction, - Policy, - SupportedData, -) +from rasa.core.policies.policy import PolicyPrediction, Policy, SupportedData from rasa.shared.core.trackers import DialogueStateTracker from rasa.shared.core.generator import TrackerWithCachedStates from rasa.shared.utils.io import is_logging_disabled @@ -90,9 +86,7 @@ def __init__( lookup: Optional[Dict] = None, ) -> None: """Initialize the policy.""" - super().__init__( - config, model_storage, resource, execution_context, featurizer, - ) + super().__init__(config, model_storage, resource, execution_context, featurizer) self.lookup = lookup or {} def _create_lookup_from_states( @@ -458,7 +452,7 @@ def recall( def _get_max_applied_events_for_max_history( - tracker: DialogueStateTracker, max_history: Optional[int], + tracker: DialogueStateTracker, max_history: Optional[int] ) -> Optional[int]: """Computes the number of events in the tracker that correspond to max_history. @@ -485,7 +479,7 @@ def _get_max_applied_events_for_max_history( def _trim_tracker_by_max_history( - tracker: DialogueStateTracker, max_history: Optional[int], + tracker: DialogueStateTracker, max_history: Optional[int] ) -> DialogueStateTracker: """Removes events from the tracker until it has `max_history` actions. diff --git a/rasa/core/policies/policy.py b/rasa/core/policies/policy.py index 5a9400e1ef8c..342083acd3f8 100644 --- a/rasa/core/policies/policy.py +++ b/rasa/core/policies/policy.py @@ -39,12 +39,7 @@ POLICY_PRIORITY, POLICY_MAX_HISTORY, ) -from rasa.shared.core.constants import ( - USER, - SLOTS, - PREVIOUS_ACTION, - ACTIVE_LOOP, -) +from rasa.shared.core.constants import USER, SLOTS, PREVIOUS_ACTION, ACTIVE_LOOP import rasa.shared.utils.common @@ -422,7 +417,7 @@ def load( ) return cls( - config, model_storage, resource, execution_context, featurizer=featurizer, + config, model_storage, resource, execution_context, featurizer=featurizer ) def _default_predictions(self, domain: Domain) -> List[float]: diff --git a/rasa/core/policies/rule_policy.py b/rasa/core/policies/rule_policy.py index b7ec4b06b29c..f2c7251ea074 100644 --- a/rasa/core/policies/rule_policy.py +++ b/rasa/core/policies/rule_policy.py @@ -15,11 +15,7 @@ from rasa.shared.constants import DOCS_URL_RULES from rasa.shared.exceptions import RasaException import rasa.shared.utils.io -from rasa.shared.core.events import ( - LoopInterrupted, - UserUttered, - ActionExecuted, -) +from rasa.shared.core.events import LoopInterrupted, UserUttered, ActionExecuted from rasa.core.featurizers.tracker_featurizers import TrackerFeaturizer from rasa.core.policies.memoization import MemoizationPolicy from rasa.core.policies.policy import SupportedData, PolicyPrediction @@ -156,7 +152,7 @@ def __init__( config[POLICY_MAX_HISTORY] = None super().__init__( - config, model_storage, resource, execution_context, featurizer, lookup, + config, model_storage, resource, execution_context, featurizer, lookup ) self._fallback_action_name = config["core_fallback_action_name"] @@ -1236,9 +1232,7 @@ def persist(self) -> None: ) def _metadata(self) -> Dict[Text, Any]: - return { - "lookup": self.lookup, - } + return {"lookup": self.lookup} @classmethod def _metadata_filename(cls) -> Text: diff --git a/rasa/core/policies/ted_policy.py b/rasa/core/policies/ted_policy.py index f2083a3feb78..aa364a49f504 100644 --- a/rasa/core/policies/ted_policy.py +++ b/rasa/core/policies/ted_policy.py @@ -33,11 +33,7 @@ SPLIT_ENTITIES_BY_COMMA, SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE, ) -from rasa.core.policies.policy import ( - PolicyPrediction, - Policy, - SupportedData, -) +from rasa.core.policies.policy import PolicyPrediction, Policy, SupportedData from rasa.core.constants import ( DIALOGUE, POLICY_MAX_HISTORY, @@ -360,7 +356,7 @@ def __init__( ) -> None: """Declares instance variables with default values.""" super().__init__( - config, model_storage, resource, execution_context, featurizer=featurizer, + config, model_storage, resource, execution_context, featurizer=featurizer ) self.split_entities_config = rasa.utils.train_utils.init_split_entities( @@ -702,7 +698,7 @@ def train( ) model_data, label_ids = self._prepare_for_training( - training_trackers, domain, precomputations, + training_trackers, domain, precomputations ) if model_data.is_empty(): @@ -938,14 +934,13 @@ def persist_model_utilities(self, model_path: Path) -> None: model_path / f"{model_filename}.meta.pkl", self.config ) rasa.utils.io.pickle_dump( - model_path / f"{model_filename}.data_example.pkl", self.data_example, + model_path / f"{model_filename}.data_example.pkl", self.data_example ) rasa.utils.io.pickle_dump( - model_path / f"{model_filename}.fake_features.pkl", self.fake_features, + model_path / f"{model_filename}.fake_features.pkl", self.fake_features ) rasa.utils.io.pickle_dump( - model_path / f"{model_filename}.label_data.pkl", - dict(self._label_data.data), + model_path / f"{model_filename}.label_data.pkl", dict(self._label_data.data) ) entity_tag_specs = ( [tag_spec._asdict() for tag_spec in self._entity_tag_specs] @@ -953,7 +948,7 @@ def persist_model_utilities(self, model_path: Path) -> None: else [] ) rasa.shared.utils.io.dump_obj_as_json_to_file( - model_path / f"{model_filename}.entity_tag_specs.json", entity_tag_specs, + model_path / f"{model_filename}.entity_tag_specs.json", entity_tag_specs ) @classmethod @@ -1020,14 +1015,14 @@ def load( try: with model_storage.read_from(resource) as model_path: return cls._load( - model_path, config, model_storage, resource, execution_context, + model_path, config, model_storage, resource, execution_context ) except ValueError: logger.debug( f"Failed to load {cls.__class__.__name__} from model storage. Resource " f"'{resource.name}' doesn't exist." ) - return cls(config, model_storage, resource, execution_context,) + return cls(config, model_storage, resource, execution_context) @classmethod def _load( @@ -1633,7 +1628,7 @@ def _encode_real_features_per_attribute( # combined batch dimension and dialogue length x 1 x units attribute_features = tf.expand_dims( self._last_token( - attribute_features, combined_sentence_sequence_feature_lengths, + attribute_features, combined_sentence_sequence_feature_lengths ), axis=1, ) diff --git a/rasa/core/policies/unexpected_intent_policy.py b/rasa/core/policies/unexpected_intent_policy.py index 5a28d1334042..eae372ade64e 100644 --- a/rasa/core/policies/unexpected_intent_policy.py +++ b/rasa/core/policies/unexpected_intent_policy.py @@ -106,11 +106,7 @@ NAME, ) from rasa.utils.tensorflow import layers -from rasa.utils.tensorflow.model_data import ( - RasaModelData, - FeatureArray, - Data, -) +from rasa.utils.tensorflow.model_data import RasaModelData, FeatureArray, Data import rasa.utils.io as io_utils from rasa.core.exceptions import RasaCoreException @@ -175,9 +171,9 @@ def get_default_config() -> Dict[Text, Any]: # the dialogue transformer encoder. ENCODING_DIMENSION: 50, # Number of units in transformer encoders - TRANSFORMER_SIZE: {TEXT: 128, DIALOGUE: 128,}, + TRANSFORMER_SIZE: {TEXT: 128, DIALOGUE: 128}, # Number of layers in transformer encoders - NUM_TRANSFORMER_LAYERS: {TEXT: 1, DIALOGUE: 1,}, + NUM_TRANSFORMER_LAYERS: {TEXT: 1, DIALOGUE: 1}, # Number of attention heads in transformer NUM_HEADS: 4, # If 'True' use key relative embeddings in attention @@ -373,7 +369,7 @@ def _assemble_label_data( label_data = RasaModelData() label_data.add_data(attribute_data, key_prefix=f"{LABEL_KEY}_") label_data.add_lengths( - f"{LABEL}_{INTENT}", SEQUENCE_LENGTH, f"{LABEL}_{INTENT}", SEQUENCE, + f"{LABEL}_{INTENT}", SEQUENCE_LENGTH, f"{LABEL}_{INTENT}", SEQUENCE ) label_ids = np.arange(len(domain.intents)) label_data.add_features( @@ -523,7 +519,7 @@ def _collect_action_metadata( query_intent_index = domain.intents.index(query_intent) def _compile_metadata_for_label( - label_name: Text, similarity_score: float, threshold: Optional[float], + label_name: Text, similarity_score: float, threshold: Optional[float] ) -> "RankingCandidateMetadata": severity = float(threshold - similarity_score) if threshold else None return { @@ -630,7 +626,7 @@ def predict_action_probabilities( ) @staticmethod - def _should_skip_prediction(tracker: DialogueStateTracker, domain: Domain,) -> bool: + def _should_skip_prediction(tracker: DialogueStateTracker, domain: Domain) -> bool: """Checks if the policy should skip making a prediction. A prediction can be skipped if: @@ -755,7 +751,7 @@ def _check_unlikely_intent( @staticmethod def _collect_label_id_grouped_scores( - output_scores: Dict[Text, np.ndarray], label_ids: np.ndarray, + output_scores: Dict[Text, np.ndarray], label_ids: np.ndarray ) -> Dict[int, Dict[Text, List[float]]]: """Collects similarities predicted for each label id. @@ -925,7 +921,7 @@ class IntentTED(TED): """ def _prepare_dot_product_loss( - self, name: Text, scale_loss: bool, prefix: Text = "loss", + self, name: Text, scale_loss: bool, prefix: Text = "loss" ) -> None: self._tf_layers[f"{prefix}.{name}"] = self.dot_product_loss_layer( self.config[NUM_NEG], diff --git a/rasa/core/processor.py b/rasa/core/processor.py index 26d2c0da7cff..7056e5024a5d 100644 --- a/rasa/core/processor.py +++ b/rasa/core/processor.py @@ -155,7 +155,7 @@ async def handle_message( return None async def run_action_extract_slots( - self, output_channel: OutputChannel, tracker: DialogueStateTracker, + self, output_channel: OutputChannel, tracker: DialogueStateTracker ) -> DialogueStateTracker: """Run action to extract slots and update the tracker accordingly. @@ -167,7 +167,7 @@ async def run_action_extract_slots( the given (updated) tracker """ action_extract_slots = rasa.core.actions.action.action_for_name_or_text( - ACTION_EXTRACT_SLOTS, self.domain, self.action_endpoint, + ACTION_EXTRACT_SLOTS, self.domain, self.action_endpoint ) extraction_events = await action_extract_slots.run( output_channel, self.nlg, tracker, self.domain @@ -711,7 +711,7 @@ def _should_handle_message(tracker: DialogueStateTracker) -> bool: ) def is_action_limit_reached( - self, tracker: DialogueStateTracker, should_predict_another_action: bool, + self, tracker: DialogueStateTracker, should_predict_another_action: bool ) -> bool: """Check whether the maximum number of predictions has been met. @@ -738,7 +738,7 @@ def is_action_limit_reached( ) async def _run_prediction_loop( - self, output_channel: OutputChannel, tracker: DialogueStateTracker, + self, output_channel: OutputChannel, tracker: DialogueStateTracker ) -> None: # keep taking actions decided by the policy until it chooses to 'listen' should_predict_another_action = True @@ -988,7 +988,7 @@ def _predict_next_with_tracker( raise ValueError("Cannot predict next action if there is no core target.") results = self.graph_runner.run( - inputs={PLACEHOLDER_TRACKER: tracker}, targets=[target], + inputs={PLACEHOLDER_TRACKER: tracker}, targets=[target] ) policy_prediction = results[target] return policy_prediction diff --git a/rasa/core/run.py b/rasa/core/run.py index 768a79cb9641..692683e1dc92 100644 --- a/rasa/core/run.py +++ b/rasa/core/run.py @@ -96,7 +96,7 @@ def configure_app( ) -> Sanic: """Run the agent.""" rasa.core.utils.configure_file_logging( - logger, log_file, use_syslog, syslog_address, syslog_port, syslog_protocol, + logger, log_file, use_syslog, syslog_address, syslog_port, syslog_protocol ) if enable_api: @@ -215,7 +215,7 @@ def serve_application( ) rasa.utils.common.update_sanic_log_level( - log_file, use_syslog, syslog_address, syslog_port, syslog_protocol, + log_file, use_syslog, syslog_address, syslog_port, syslog_protocol ) app.run( diff --git a/rasa/core/test.py b/rasa/core/test.py index d31128376911..54b15679eacb 100644 --- a/rasa/core/test.py +++ b/rasa/core/test.py @@ -843,7 +843,7 @@ async def _predict_tracker_actions( prediction, entity_result, ) = await _collect_action_executed_predictions( - processor, partial_tracker, event, fail_on_prediction_errors, + processor, partial_tracker, event, fail_on_prediction_errors ) if entity_result: policy_entity_results.append(entity_result) @@ -993,9 +993,7 @@ async def _collect_story_predictions( else: accuracy = 0 - _log_evaluation_table( - [1] * len(completed_trackers), "CONVERSATION", accuracy, - ) + _log_evaluation_table([1] * len(completed_trackers), "CONVERSATION", accuracy) return ( StoryEvaluation( diff --git a/rasa/core/tracker_store.py b/rasa/core/tracker_store.py index c2f927bc53c9..fb61f2ef7539 100644 --- a/rasa/core/tracker_store.py +++ b/rasa/core/tracker_store.py @@ -418,9 +418,9 @@ def get_or_create_table( except self.client.exceptions.ResourceNotFoundException: table = dynamo.create_table( TableName=self.table_name, - KeySchema=[{"AttributeName": "sender_id", "KeyType": "HASH"},], + KeySchema=[{"AttributeName": "sender_id", "KeyType": "HASH"}], AttributeDefinitions=[ - {"AttributeName": "sender_id", "AttributeType": "S"}, + {"AttributeName": "sender_id", "AttributeType": "S"} ], ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, ) @@ -443,9 +443,7 @@ def save(self, tracker: DialogueStateTracker) -> None: def serialise_tracker(self, tracker: "DialogueStateTracker") -> Dict: """Serializes the tracker, returns object with decimal types.""" d = tracker.as_dialogue().as_dict() - d.update( - {"sender_id": tracker.sender_id,} - ) + d.update({"sender_id": tracker.sender_id}) # DynamoDB cannot store `float`s, so we'll convert them to `Decimal`s return core_utils.replace_floats_with_decimals(d) @@ -890,7 +888,7 @@ def _create_database_and_update_engine(self, db: Text, engine_url: "URL") -> Non if not self.engine.dialect.name == "postgresql": rasa.shared.utils.io.raise_warning( - "The parameter 'login_db' can only be used with a postgres database.", + "The parameter 'login_db' can only be used with a postgres database." ) return diff --git a/rasa/core/training/__init__.py b/rasa/core/training/__init__.py index 7b3d30b95925..414f945fdb44 100644 --- a/rasa/core/training/__init__.py +++ b/rasa/core/training/__init__.py @@ -8,7 +8,7 @@ def extract_story_graph( - resource_name: Text, domain: "Domain", exclusion_percentage: Optional[int] = None, + resource_name: Text, domain: "Domain", exclusion_percentage: Optional[int] = None ) -> "StoryGraph": """Loads training stories / rules from file or directory. @@ -25,7 +25,7 @@ def extract_story_graph( import rasa.shared.core.training_data.loading as core_loading story_steps = core_loading.load_data_from_resource( - resource_name, domain, exclusion_percentage=exclusion_percentage, + resource_name, domain, exclusion_percentage=exclusion_percentage ) return StoryGraph(story_steps) diff --git a/rasa/core/training/interactive.py b/rasa/core/training/interactive.py index 2e86909986ed..34a5a262ceeb 100644 --- a/rasa/core/training/interactive.py +++ b/rasa/core/training/interactive.py @@ -666,7 +666,7 @@ async def _request_action_from_user( await _print_history(conversation_id, endpoint) choices = [ - {"name": f'{a["score"]:03.2f} {a["action"]:40}', "value": a["action"],} + {"name": f'{a["score"]:03.2f} {a["action"]:40}', "value": a["action"]} for a in predictions ] @@ -1628,7 +1628,7 @@ async def visualisation_html(request: Request) -> HTTPResponse: # noinspection PyUnusedLocal @app.route("/visualization.dot", methods=["GET"]) - async def visualisation_png(request: Request,) -> HTTPResponse: + async def visualisation_png(request: Request) -> HTTPResponse: try: headers = {"Cache-Control": "no-cache"} return await response.file(os.path.abspath(image_path), headers=headers) diff --git a/rasa/core/training/story_conflict.py b/rasa/core/training/story_conflict.py index c6a2fb645807..dc43a111a7e5 100644 --- a/rasa/core/training/story_conflict.py +++ b/rasa/core/training/story_conflict.py @@ -170,7 +170,7 @@ def find_story_conflicts( # Iterate once more over all states and note the (unhashed) state, # for which a conflict occurs conflicts = _build_conflicts_from_states( - trackers, domain, max_history, conflicting_state_action_mapping, + trackers, domain, max_history, conflicting_state_action_mapping ) return conflicts @@ -266,7 +266,7 @@ def _build_conflicts_from_states( conflicts[hashed_state] = StoryConflict(element.sliced_states) conflicts[hashed_state].add_conflicting_action( - action=str(element.event), story_name=element.tracker.sender_id, + action=str(element.event), story_name=element.tracker.sender_id ) # Return list of conflicts that arise from unpredictable actions diff --git a/rasa/core/utils.py b/rasa/core/utils.py index ef626b7d4461..0638b36accde 100644 --- a/rasa/core/utils.py +++ b/rasa/core/utils.py @@ -3,15 +3,7 @@ import os from decimal import Decimal from pathlib import Path -from typing import ( - Any, - Dict, - Optional, - Set, - Text, - Tuple, - Union, -) +from typing import Any, Dict, Optional, Set, Text, Tuple, Union import numpy as np @@ -54,7 +46,7 @@ def configure_file_logging( ) socktype = SOCK_STREAM if syslog_protocol == TCP_PROTOCOL else SOCK_DGRAM syslog_handler = logging.handlers.SysLogHandler( - address=(syslog_address, syslog_port), socktype=socktype, + address=(syslog_address, syslog_port), socktype=socktype ) syslog_handler.setLevel(logger_obj.level) syslog_handler.setFormatter(formatter) diff --git a/rasa/engine/caching.py b/rasa/engine/caching.py index d5301385afd9..ea934367eb2a 100644 --- a/rasa/engine/caching.py +++ b/rasa/engine/caching.py @@ -227,9 +227,9 @@ def _drop_cache_entries_from_incompatible_versions(self) -> None: def _find_incompatible_cache_entries(self) -> List[LocalTrainingCache.CacheEntry]: with self._sessionmaker() as session: query_for_cache_entries = sa.select(self.CacheEntry) - all_entries: List[LocalTrainingCache.CacheEntry] = session.execute( - query_for_cache_entries - ).scalars().all() + all_entries: List[LocalTrainingCache.CacheEntry] = ( + session.execute(query_for_cache_entries).scalars().all() + ) return [ entry diff --git a/rasa/engine/graph.py b/rasa/engine/graph.py index 18747846f33c..b52a9ce3442c 100644 --- a/rasa/engine/graph.py +++ b/rasa/engine/graph.py @@ -6,10 +6,7 @@ import logging from typing import Any, Callable, Dict, List, Optional, Text, Type, Tuple -from rasa.engine.exceptions import ( - GraphComponentException, - GraphSchemaException, -) +from rasa.engine.exceptions import GraphComponentException, GraphSchemaException import rasa.shared.utils.common import rasa.utils.common from rasa.engine.storage.resource import Resource @@ -361,7 +358,7 @@ def __init__( self._component_class, self._constructor_name ) self._component_config: Dict[Text, Any] = rasa.utils.common.override_defaults( - self._component_class.get_default_config(), component_config, + self._component_class.get_default_config(), component_config ) self._fn_name: Text = fn_name self._fn: Callable = getattr(self._component_class, self._fn_name) diff --git a/rasa/engine/recipes/default_components.py b/rasa/engine/recipes/default_components.py index cc38dbf35a0f..eb0eb94930ec 100644 --- a/rasa/engine/recipes/default_components.py +++ b/rasa/engine/recipes/default_components.py @@ -30,10 +30,7 @@ from rasa.core.policies.ted_policy import TEDPolicy -from rasa.core.policies.memoization import ( - MemoizationPolicy, - AugmentedMemoizationPolicy, -) +from rasa.core.policies.memoization import MemoizationPolicy, AugmentedMemoizationPolicy from rasa.core.policies.rule_policy import RulePolicy from rasa.core.policies.unexpected_intent_policy import UnexpecTEDIntentPolicy diff --git a/rasa/engine/recipes/default_recipe.py b/rasa/engine/recipes/default_recipe.py index d00cf0b2439d..be4c102f465f 100644 --- a/rasa/engine/recipes/default_recipe.py +++ b/rasa/engine/recipes/default_recipe.py @@ -50,7 +50,7 @@ logger = logging.getLogger(__name__) -DEFAULT_PREDICT_KWARGS = dict(constructor_name="load", eager=True, is_target=False,) +DEFAULT_PREDICT_KWARGS = dict(constructor_name="load", eager=True, is_target=False) class DefaultV1RecipeRegisterException(RasaException): @@ -329,7 +329,7 @@ def _add_nlu_train_node( cli_parameters: Dict[Text, Any], ) -> Text: config_from_cli = self._extra_config_from_cli(cli_parameters, component, config) - model_provider_needs = self._get_model_provider_needs(train_nodes, component,) + model_provider_needs = self._get_model_provider_needs(train_nodes, component) train_node_name = f"train_{component_name}" train_nodes[train_node_name] = SchemaNode( @@ -394,7 +394,7 @@ def _add_nlu_process_node( resource_needs = {"resource": from_resource} model_provider_needs = self._get_model_provider_needs( - train_nodes, component_class, + train_nodes, component_class ) node_name = f"run_{component_name}" @@ -412,7 +412,7 @@ def _add_nlu_process_node( return node_name def _get_model_provider_needs( - self, nodes: Dict[Text, SchemaNode], component_class: Type[GraphComponent], + self, nodes: Dict[Text, SchemaNode], component_class: Type[GraphComponent] ) -> Dict[Text, Text]: model_provider_needs = {} component = self._from_registry(component_class.__name__) @@ -517,7 +517,7 @@ def _add_core_train_nodes( self._add_end_to_end_features_for_training(preprocessors, train_nodes) def _add_end_to_end_features_for_training( - self, preprocessors: List[Text], train_nodes: Dict[Text, SchemaNode], + self, preprocessors: List[Text], train_nodes: Dict[Text, SchemaNode] ) -> None: train_nodes["story_to_nlu_training_data_converter"] = SchemaNode( needs={ @@ -542,7 +542,7 @@ def _add_end_to_end_features_for_training( node_with_e2e_features = "end_to_end_features_provider" train_nodes[node_with_e2e_features] = SchemaNode( - needs={"messages": last_node_name,}, + needs={"messages": last_node_name}, uses=CoreFeaturizationCollector, constructor_name="create", fn="collect", @@ -591,7 +591,7 @@ def _create_predict_nodes( if self._use_core: self._add_core_predict_nodes( - predict_config, predict_nodes, train_nodes, preprocessors, + predict_config, predict_nodes, train_nodes, preprocessors ) return predict_nodes @@ -693,7 +693,7 @@ def _add_nlu_predict_node( ) -> Text: node_name = f"run_{component_name}" - model_provider_needs = self._get_model_provider_needs(predict_nodes, node.uses,) + model_provider_needs = self._get_model_provider_needs(predict_nodes, node.uses) predict_nodes[node_name] = dataclasses.replace( node, @@ -785,7 +785,7 @@ def _add_core_predict_nodes( ) def _add_end_to_end_features_for_inference( - self, predict_nodes: Dict[Text, SchemaNode], preprocessors: List[Text], + self, predict_nodes: Dict[Text, SchemaNode], preprocessors: List[Text] ) -> Text: predict_nodes["tracker_to_message_converter"] = SchemaNode( **DEFAULT_PREDICT_KWARGS, @@ -808,7 +808,7 @@ def _add_end_to_end_features_for_inference( node_with_e2e_features = "end_to_end_features_provider" predict_nodes[node_with_e2e_features] = SchemaNode( **DEFAULT_PREDICT_KWARGS, - needs={"messages": last_node_name,}, + needs={"messages": last_node_name}, uses=CoreFeaturizationCollector, fn="collect", config={}, diff --git a/rasa/engine/runner/dask.py b/rasa/engine/runner/dask.py index f51e15992eae..f2d5d7d511a3 100644 --- a/rasa/engine/runner/dask.py +++ b/rasa/engine/runner/dask.py @@ -6,12 +6,7 @@ import dask from rasa.engine.exceptions import GraphRunError -from rasa.engine.graph import ( - ExecutionContext, - GraphNode, - GraphNodeHook, - GraphSchema, -) +from rasa.engine.graph import ExecutionContext, GraphNode, GraphNodeHook, GraphSchema from rasa.engine.runner.interface import GraphRunner from rasa.engine.storage.storage import ModelStorage @@ -109,7 +104,7 @@ def run( raise GraphRunError("Error running runner.") from e @staticmethod - def _add_inputs_to_graph(inputs: Optional[Dict[Text, Any]], graph: Any,) -> None: + def _add_inputs_to_graph(inputs: Optional[Dict[Text, Any]], graph: Any) -> None: for input_name, input_value in inputs.items(): if isinstance(input_value, str) and input_value in graph.keys(): raise GraphRunError( diff --git a/rasa/engine/storage/local_model_storage.py b/rasa/engine/storage/local_model_storage.py index 54a765cf7714..84c2d48d3dfd 100644 --- a/rasa/engine/storage/local_model_storage.py +++ b/rasa/engine/storage/local_model_storage.py @@ -12,10 +12,7 @@ import rasa.utils.common import rasa.shared.utils.io -from rasa.engine.storage.storage import ( - ModelMetadata, - ModelStorage, -) +from rasa.engine.storage.storage import ModelMetadata, ModelStorage from rasa.engine.graph import GraphModelConfiguration from rasa.engine.storage.resource import Resource from rasa.exceptions import UnsupportedModelVersionError @@ -65,10 +62,7 @@ def from_model_archive( metadata = cls._load_metadata(temporary_directory) - return ( - cls(storage_path), - metadata, - ) + return (cls(storage_path), metadata) @classmethod def metadata_from_archive( @@ -85,14 +79,14 @@ def metadata_from_archive( @staticmethod def _extract_archive_to_directory( - model_archive_path: Union[Text, Path], temporary_directory: Path, + model_archive_path: Union[Text, Path], temporary_directory: Path ) -> None: with TarSafe.open(model_archive_path, mode="r:gz") as tar: tar.extractall(temporary_directory) LocalModelStorage._assert_not_rasa2_archive(temporary_directory) @staticmethod - def _assert_not_rasa2_archive(temporary_directory: Path,) -> None: + def _assert_not_rasa2_archive(temporary_directory: Path) -> None: fingerprint_file = Path(temporary_directory) / "fingerprint.json" if fingerprint_file.is_file(): serialized_fingerprint = rasa.shared.utils.io.read_json_file( @@ -107,9 +101,7 @@ def _initialize_model_storage_from_model_archive( temporary_directory: Path, storage_path: Path ) -> None: for path in (temporary_directory / MODEL_ARCHIVE_COMPONENTS_DIR).glob("*"): - shutil.move( - str(path), str(storage_path), - ) + shutil.move(str(path), str(storage_path)) @staticmethod def _load_metadata(directory: Path) -> ModelMetadata: @@ -183,7 +175,7 @@ def create_model_package( return model_metadata @staticmethod - def _persist_metadata(metadata: ModelMetadata, temporary_directory: Path,) -> None: + def _persist_metadata(metadata: ModelMetadata, temporary_directory: Path) -> None: rasa.shared.utils.io.dump_obj_as_json_to_file( temporary_directory / MODEL_ARCHIVE_METADATA_FILE, metadata.as_dict() diff --git a/rasa/engine/storage/storage.py b/rasa/engine/storage/storage.py index 7f3fd970882a..bf1937d0882e 100644 --- a/rasa/engine/storage/storage.py +++ b/rasa/engine/storage/storage.py @@ -139,7 +139,7 @@ class ModelMetadata: language: Optional[Text] training_type: TrainingType = TrainingType.BOTH - def __post_init__(self,) -> None: + def __post_init__(self) -> None: """Raises an exception when the meta data indicates an unsupported version. Raises: diff --git a/rasa/engine/training/components.py b/rasa/engine/training/components.py index b6064ed9dffe..f73255f45320 100644 --- a/rasa/engine/training/components.py +++ b/rasa/engine/training/components.py @@ -18,9 +18,7 @@ class PrecomputedValueProvider(GraphComponent): - values which were provided during the fingerprint run by input nodes """ - def __init__( - self, output: Cacheable, - ): + def __init__(self, output: Cacheable): """Initializes a `PrecomputedValueProvider`. Args: @@ -37,7 +35,7 @@ def create( execution_context: ExecutionContext, ) -> PrecomputedValueProvider: """Creates instance (see parent class for full docstring).""" - return cls(output=config["output"],) + return cls(output=config["output"]) def get_value(self) -> Cacheable: """Returns the precomputed output.""" diff --git a/rasa/engine/training/graph_trainer.py b/rasa/engine/training/graph_trainer.py index 2875bc04aef5..cf2069264c8b 100644 --- a/rasa/engine/training/graph_trainer.py +++ b/rasa/engine/training/graph_trainer.py @@ -7,10 +7,7 @@ from rasa.engine.graph import ExecutionContext, GraphSchema, GraphModelConfiguration from rasa.engine.constants import PLACEHOLDER_IMPORTER from rasa.engine.runner.interface import GraphRunner -from rasa.engine.storage.storage import ( - ModelStorage, - ModelMetadata, -) +from rasa.engine.storage.storage import ModelStorage, ModelMetadata from rasa.engine.training.components import ( PrecomputedValueProvider, FingerprintComponent, @@ -108,7 +105,7 @@ def train( graph_runner.run(inputs={PLACEHOLDER_IMPORTER: importer}) return self._model_storage.create_model_package( - output_filename, model_configuration, domain, + output_filename, model_configuration, domain ) def fingerprint( diff --git a/rasa/engine/validation.py b/rasa/engine/validation.py index 2a8185eeee69..fe843b2dfb96 100644 --- a/rasa/engine/validation.py +++ b/rasa/engine/validation.py @@ -101,9 +101,7 @@ def _validate( ) _validate_constructor(node, create_fn_params) - _validate_needs( - node, schema, create_fn_params, run_fn_params, - ) + _validate_needs(node, schema, create_fn_params, run_fn_params) _validate_required_components(schema) @@ -125,7 +123,7 @@ def _validate_prediction_targets( def _validate_target( - target_name: Text, target_type: Text, expected_type: Type, schema: GraphSchema, + target_name: Text, target_type: Text, expected_type: Type, schema: GraphSchema ) -> None: if target_name not in schema.nodes: raise GraphSchemaValidationException( @@ -195,7 +193,7 @@ def _validate_interface_usage(node: SchemaNode) -> None: ) -def _validate_supported_languages(language: Optional[Text], node: SchemaNode,) -> None: +def _validate_supported_languages(language: Optional[Text], node: SchemaNode) -> None: supported_languages = node.uses.supported_languages() not_supported_languages = node.uses.not_supported_languages() @@ -378,7 +376,7 @@ def _validate_types_of_reserved_keywords( def _validate_constructor( - node: SchemaNode, create_fn_params: Dict[Text, ParameterInfo], + node: SchemaNode, create_fn_params: Dict[Text, ParameterInfo] ) -> None: _validate_types_of_reserved_keywords(create_fn_params, node, node.constructor_name) @@ -497,11 +495,11 @@ def _validate_parent_return_type( ) -def _validate_required_components(schema: GraphSchema,) -> None: +def _validate_required_components(schema: GraphSchema) -> None: unmet_requirements: Dict[Type, Set[Text]] = dict() for target_name in schema.target_names: unmet_requirements_for_target, _ = _recursively_check_required_components( - node_name=target_name, schema=schema, + node_name=target_name, schema=schema ) for component_type, node_names in unmet_requirements_for_target.items(): unmet_requirements.setdefault(component_type, set()).update(node_names) @@ -529,7 +527,7 @@ def _validate_required_components(schema: GraphSchema,) -> None: def _recursively_check_required_components( - node_name: Text, schema: GraphSchema, + node_name: Text, schema: GraphSchema ) -> Tuple[Dict[Type, Set[Text]], Set[Type]]: """Collects unmet requirements and types used in the subtree rooted at `node_name`. diff --git a/rasa/graph_components/providers/domain_for_core_training_provider.py b/rasa/graph_components/providers/domain_for_core_training_provider.py index d9390d6cffb8..ae1455de277f 100644 --- a/rasa/graph_components/providers/domain_for_core_training_provider.py +++ b/rasa/graph_components/providers/domain_for_core_training_provider.py @@ -7,12 +7,7 @@ from rasa.engine.storage.resource import Resource from rasa.engine.storage.storage import ModelStorage from rasa.shared.constants import REQUIRED_SLOTS_KEY -from rasa.shared.core.domain import ( - KEY_RESPONSES, - Domain, - SESSION_CONFIG_KEY, - KEY_FORMS, -) +from rasa.shared.core.domain import KEY_RESPONSES, Domain, SESSION_CONFIG_KEY, KEY_FORMS class DomainForCoreTrainingProvider(GraphComponent): diff --git a/rasa/graph_components/providers/nlu_training_data_provider.py b/rasa/graph_components/providers/nlu_training_data_provider.py index 188953f36564..55aa889ade5d 100644 --- a/rasa/graph_components/providers/nlu_training_data_provider.py +++ b/rasa/graph_components/providers/nlu_training_data_provider.py @@ -14,7 +14,7 @@ class NLUTrainingDataProvider(GraphComponent): """Provides NLU training data during training.""" def __init__( - self, config: Dict[Text, Any], model_storage: ModelStorage, resource: Resource, + self, config: Dict[Text, Any], model_storage: ModelStorage, resource: Resource ) -> None: """Creates a new NLU training data provider.""" self._config = config @@ -45,7 +45,7 @@ def _persist(self, training_data: TrainingData) -> None: filename=DEFAULT_TRAINING_DATA_OUTPUT_PATH, ) - def provide(self, importer: TrainingDataImporter,) -> TrainingData: + def provide(self, importer: TrainingDataImporter) -> TrainingData: """Provides nlu training data during training.""" if "language" in self._config: training_data = importer.get_nlu_data(language=self._config["language"]) diff --git a/rasa/graph_components/providers/story_graph_provider.py b/rasa/graph_components/providers/story_graph_provider.py index a915f2507967..bb4635191d0f 100644 --- a/rasa/graph_components/providers/story_graph_provider.py +++ b/rasa/graph_components/providers/story_graph_provider.py @@ -18,9 +18,7 @@ def __init__(self, config: Dict[Text, Any]) -> None: @staticmethod def get_default_config() -> Dict[Text, Any]: """Returns default configuration (see parent class for full docstring).""" - return { - "exclusion_percentage": None, - } + return {"exclusion_percentage": None} @classmethod def create( @@ -33,7 +31,7 @@ def create( """Creates component (see parent class for full docstring).""" return cls(config) - def provide(self, importer: TrainingDataImporter,) -> StoryGraph: + def provide(self, importer: TrainingDataImporter) -> StoryGraph: """Provides the story graph from the training data. Args: diff --git a/rasa/graph_components/validators/default_recipe_validator.py b/rasa/graph_components/validators/default_recipe_validator.py index ef175711889b..6298f13d528e 100644 --- a/rasa/graph_components/validators/default_recipe_validator.py +++ b/rasa/graph_components/validators/default_recipe_validator.py @@ -44,17 +44,9 @@ # TODO: Can we replace this with the registered types from the regitry? -TRAINABLE_EXTRACTORS = [ - MitieEntityExtractor, - CRFEntityExtractor, - DIETClassifier, -] +TRAINABLE_EXTRACTORS = [MitieEntityExtractor, CRFEntityExtractor, DIETClassifier] # TODO: replace these once the Recipe is merged (used in tests) -POLICY_CLASSSES = { - TEDPolicy, - MemoizationPolicy, - RulePolicy, -} +POLICY_CLASSSES = {TEDPolicy, MemoizationPolicy, RulePolicy} def _types_to_str(types: Iterable[Type]) -> Text: @@ -180,7 +172,7 @@ def _warn_if_some_training_data_is_unused( ) if training_data.regex_features and self._component_types.isdisjoint( - [RegexFeaturizer, RegexEntityExtractor], + [RegexFeaturizer, RegexEntityExtractor] ): rasa.shared.utils.io.raise_warning( f"You have defined training data with regexes, but " @@ -194,7 +186,7 @@ def _warn_if_some_training_data_is_unused( ) if training_data.lookup_tables and self._component_types.isdisjoint( - [RegexFeaturizer, RegexEntityExtractor], + [RegexFeaturizer, RegexEntityExtractor] ): rasa.shared.utils.io.raise_warning( f"You have defined training data consisting of lookup tables, but " @@ -354,7 +346,7 @@ def _warn_of_competition_with_regex_extractor( docs=f"{DOCS_URL_COMPONENTS}#regexentityextractor", ) - def _raise_if_featurizers_are_not_compatible(self,) -> None: + def _raise_if_featurizers_are_not_compatible(self) -> None: """Raises or warns if there are problems regarding the featurizers. Raises: @@ -408,7 +400,7 @@ def _warn_if_no_rule_policy_is_contained(self) -> None: ) def _raise_if_domain_contains_form_names_but_no_rule_policy_given( - self, domain: Domain, + self, domain: Domain ) -> None: """Validates that there exists a rule policy if forms are defined. @@ -430,7 +422,7 @@ def _raise_if_domain_contains_form_names_but_no_rule_policy_given( ) def _raise_if_a_rule_policy_is_incompatible_with_domain( - self, domain: Domain, + self, domain: Domain ) -> None: """Validates the rule policies against the domain. diff --git a/rasa/graph_components/validators/finetuning_validator.py b/rasa/graph_components/validators/finetuning_validator.py index ee479145460b..c747789894aa 100644 --- a/rasa/graph_components/validators/finetuning_validator.py +++ b/rasa/graph_components/validators/finetuning_validator.py @@ -83,7 +83,7 @@ def __init__( self._core = config["validate_core"] self._nlu = config["validate_nlu"] - def validate(self, importer: TrainingDataImporter,) -> TrainingDataImporter: + def validate(self, importer: TrainingDataImporter) -> TrainingDataImporter: """Validates whether we can finetune Core and NLU when finetuning is enabled. Args: @@ -187,7 +187,7 @@ def _validate(self, importer: TrainingDataImporter) -> None: self.persist() def _compare_or_memorize( - self, fingerprint_key: Text, new_fingerprint: Text, error_message: Text, + self, fingerprint_key: Text, new_fingerprint: Text, error_message: Text ) -> None: """Compares given fingerprint if we are finetuning, otherwise just saves it. @@ -222,7 +222,7 @@ def _get_fingerprint_of_domain_pruned_for_core(domain: Domain) -> Text: pruned_domain = DomainForCoreTrainingProvider.create_pruned_version(domain) return pruned_domain.fingerprint() - def _get_fingerprint_of_schema_without_irrelevant_keys(self,) -> Text: + def _get_fingerprint_of_schema_without_irrelevant_keys(self) -> Text: """Returns a fingerprint of the given schema with certain items removed. These items include specifications that do not influence actual training @@ -284,7 +284,7 @@ def load( try: with model_storage.read_from(resource) as path: fingerprints = rasa.shared.utils.io.read_json_file( - filename=path / cls.FILENAME, + filename=path / cls.FILENAME ) return cls( config=config, diff --git a/rasa/model.py b/rasa/model.py index c7ed81abfd98..6ccd32ac8857 100644 --- a/rasa/model.py +++ b/rasa/model.py @@ -4,11 +4,7 @@ import os from pathlib import Path from subprocess import check_output, DEVNULL, CalledProcessError -from typing import ( - Text, - Optional, - Union, -) +from typing import Text, Optional, Union from rasa.shared.constants import DEFAULT_MODELS_PATH diff --git a/rasa/model_testing.py b/rasa/model_testing.py index e6e5341f9422..f45968d5e7a7 100644 --- a/rasa/model_testing.py +++ b/rasa/model_testing.py @@ -117,10 +117,7 @@ async def test_core_models( from rasa.core.test import compare_models await compare_models( - models, - stories, - output, - use_conversation_test_files=use_conversation_test_files, + models, stories, output, use_conversation_test_files=use_conversation_test_files ) @@ -177,11 +174,7 @@ async def test_core( ) await core_test( - stories, - _agent, - e2e=use_conversation_test_files, - out_directory=output, - **kwargs, + stories, _agent, e2e=use_conversation_test_files, out_directory=output, **kwargs ) diff --git a/rasa/model_training.py b/rasa/model_training.py index 71650fe7d0be..922cf6f2135d 100644 --- a/rasa/model_training.py +++ b/rasa/model_training.py @@ -1,15 +1,7 @@ import tempfile import time from pathlib import Path -from typing import ( - Text, - NamedTuple, - Optional, - List, - Union, - Dict, - Any, -) +from typing import Text, NamedTuple, Optional, List, Union, Dict, Any import randomname @@ -164,9 +156,7 @@ def train( ) training_type = TrainingType.CORE - with telemetry.track_model_training( - file_importer, model_type="rasa", - ): + with telemetry.track_model_training(file_importer, model_type="rasa"): return _train_graph( file_importer, training_type=training_type, @@ -232,7 +222,7 @@ def _train_graph( full_model_path = Path(output_path, model_name) with telemetry.track_model_training( - file_importer, model_type=training_type.model_type, + file_importer, model_type=training_type.model_type ): trainer.train( model_configuration, diff --git a/rasa/nlu/classifiers/diet_classifier.py b/rasa/nlu/classifiers/diet_classifier.py index bf5016ecf2ca..9eb96dc6d98f 100644 --- a/rasa/nlu/classifiers/diet_classifier.py +++ b/rasa/nlu/classifiers/diet_classifier.py @@ -968,7 +968,7 @@ def _predict_label( ranking = [(idx, casted_message_sim[idx]) for idx in ranked_label_indices] label_ranking = [ - {"name": self.index_label_id_mapping[label_idx], "confidence": score,} + {"name": self.index_label_id_mapping[label_idx], "confidence": score} for label_idx, score in ranking ] @@ -1726,7 +1726,7 @@ def batch_predict( tf_batch_data, TEXT ) sentence_feature_lengths = self._get_sentence_feature_lengths( - tf_batch_data, TEXT, + tf_batch_data, TEXT ) text_transformed, _, _, _, _, attention_weights = self._tf_layers[ diff --git a/rasa/nlu/classifiers/fallback_classifier.py b/rasa/nlu/classifiers/fallback_classifier.py index 4e1e56efd43a..05d1dbc43a00 100644 --- a/rasa/nlu/classifiers/fallback_classifier.py +++ b/rasa/nlu/classifiers/fallback_classifier.py @@ -52,7 +52,7 @@ def get_default_config() -> Dict[Text, Any]: AMBIGUITY_THRESHOLD_KEY: DEFAULT_NLU_FALLBACK_AMBIGUITY_THRESHOLD, } - def __init__(self, config: Dict[Text, Any],) -> None: + def __init__(self, config: Dict[Text, Any]) -> None: """Constructs a new fallback classifier.""" self.component_config = config diff --git a/rasa/nlu/classifiers/keyword_intent_classifier.py b/rasa/nlu/classifiers/keyword_intent_classifier.py index 70506b037881..43d48f3ddd1b 100644 --- a/rasa/nlu/classifiers/keyword_intent_classifier.py +++ b/rasa/nlu/classifiers/keyword_intent_classifier.py @@ -60,7 +60,7 @@ def create( """Creates a new untrained component (see parent class for full docstring).""" return cls(config, model_storage, resource, execution_context) - def train(self, training_data: TrainingData,) -> Resource: + def train(self, training_data: TrainingData) -> Resource: """Trains the intent classifier on a data set.""" duplicate_examples = set() for ex in training_data.intent_examples: @@ -176,5 +176,5 @@ def load( intent_keyword_map = None return cls( - config, model_storage, resource, execution_context, intent_keyword_map, + config, model_storage, resource, execution_context, intent_keyword_map ) diff --git a/rasa/nlu/classifiers/sklearn_intent_classifier.py b/rasa/nlu/classifiers/sklearn_intent_classifier.py index e0a1516a5f06..771d0c1046f2 100644 --- a/rasa/nlu/classifiers/sklearn_intent_classifier.py +++ b/rasa/nlu/classifiers/sklearn_intent_classifier.py @@ -285,7 +285,7 @@ def load( encoder = LabelEncoder() encoder.classes_ = classes - return cls(config, model_storage, resource, classifier, encoder,) + return cls(config, model_storage, resource, classifier, encoder) except ValueError: logger.debug( f"Failed to load '{cls.__name__}' from model storage. Resource " diff --git a/rasa/nlu/constants.py b/rasa/nlu/constants.py index 6ffad59a82cd..c62eeb0dc40c 100644 --- a/rasa/nlu/constants.py +++ b/rasa/nlu/constants.py @@ -17,10 +17,7 @@ EXTRACTOR = "extractor" -PRETRAINED_EXTRACTORS = { - "DucklingEntityExtractor", - "SpacyEntityExtractor", -} +PRETRAINED_EXTRACTORS = {"DucklingEntityExtractor", "SpacyEntityExtractor"} NUMBER_OF_SUB_TOKENS = "number_of_sub_tokens" diff --git a/rasa/nlu/emulators/dialogflow.py b/rasa/nlu/emulators/dialogflow.py index 89d4a4647735..6e7665bb8858 100644 --- a/rasa/nlu/emulators/dialogflow.py +++ b/rasa/nlu/emulators/dialogflow.py @@ -23,7 +23,7 @@ class DialogflowEmulator(Emulator): """ def normalise_response_json(self, data: Dict[Text, Any]) -> Dict[Text, Any]: - """"Transform response JSON to DialogFlow format. + """Transform response JSON to DialogFlow format. Args: data: input JSON data as a dictionary. diff --git a/rasa/nlu/extractors/entity_synonyms.py b/rasa/nlu/extractors/entity_synonyms.py index 3f6ffb676720..7af0efa1c472 100644 --- a/rasa/nlu/extractors/entity_synonyms.py +++ b/rasa/nlu/extractors/entity_synonyms.py @@ -62,7 +62,7 @@ def create( """Creates component (see parent class for full docstring).""" return cls(config, model_storage, resource, synonyms) - def train(self, training_data: TrainingData,) -> Resource: + def train(self, training_data: TrainingData) -> Resource: """Trains the synonym lookup table.""" for key, value in list(training_data.entity_synonyms.items()): self._add_entities_if_synonyms(key, value) diff --git a/rasa/nlu/extractors/mitie_entity_extractor.py b/rasa/nlu/extractors/mitie_entity_extractor.py index 3d855e8561b5..8472fdb4005b 100644 --- a/rasa/nlu/extractors/mitie_entity_extractor.py +++ b/rasa/nlu/extractors/mitie_entity_extractor.py @@ -54,9 +54,7 @@ def required_packages() -> List[Text]: @staticmethod def get_default_config() -> Dict[Text, Any]: """The component's default config (see parent class for full docstring).""" - return { - "num_threads": 1, - } + return {"num_threads": 1} def __init__( self, @@ -191,7 +189,7 @@ def _prepare_mitie_sample(training_example: Message) -> Any: continue return sample - def process(self, messages: List[Message], model: MitieModel,) -> List[Message]: + def process(self, messages: List[Message], model: MitieModel) -> List[Message]: """Extracts entities from messages and appends them to the attribute. If no patterns where found during training, then the given messages will not @@ -217,7 +215,7 @@ def process(self, messages: List[Message], model: MitieModel,) -> List[Message]: return messages def _extract_entities( - self, message: Message, mitie_model: MitieModel, + self, message: Message, mitie_model: MitieModel ) -> List[Dict[Text, Any]]: """Extract entities of the given type from the given user message. @@ -234,7 +232,7 @@ def _extract_entities( entities = [] token_texts = [token.text for token in tokens] mitie_entities = self._ner.extract_entities( - token_texts, mitie_model.word_feature_extractor, + token_texts, mitie_model.word_feature_extractor ) for e in mitie_entities: if len(e[0]): diff --git a/rasa/nlu/extractors/regex_entity_extractor.py b/rasa/nlu/extractors/regex_entity_extractor.py index 228186ccc46c..cd4568a3e4ca 100644 --- a/rasa/nlu/extractors/regex_entity_extractor.py +++ b/rasa/nlu/extractors/regex_entity_extractor.py @@ -210,7 +210,7 @@ def load( f"could not be extracted from the given training data - and hence " f"could not be persisted." ) - return cls(config, model_storage=model_storage, resource=resource,) + return cls(config, model_storage=model_storage, resource=resource) def persist(self) -> None: """Persist this model.""" diff --git a/rasa/nlu/featurizers/dense_featurizer/convert_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/convert_featurizer.py index f688d7556da7..57c5a123d0ce 100644 --- a/rasa/nlu/featurizers/dense_featurizer/convert_featurizer.py +++ b/rasa/nlu/featurizers/dense_featurizer/convert_featurizer.py @@ -23,10 +23,7 @@ TOKENS_NAMES, NUMBER_OF_SUB_TOKENS, ) -from rasa.shared.nlu.constants import ( - TEXT, - ACTION_TEXT, -) +from rasa.shared.nlu.constants import TEXT, ACTION_TEXT from rasa.exceptions import RasaException import rasa.nlu.utils import rasa.utils.train_utils as train_utils @@ -337,7 +334,7 @@ def _sequence_encoding_of_text(self, batch: List[Text]) -> np.ndarray: "sequence_encoding" ].numpy() - def process_training_data(self, training_data: TrainingData,) -> TrainingData: + def process_training_data(self, training_data: TrainingData) -> TrainingData: """Featurize all message attributes in the training data with the ConveRT model. Args: diff --git a/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py index f795a6e4001e..5533bec91b7a 100644 --- a/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py +++ b/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py @@ -20,10 +20,7 @@ NUMBER_OF_SUB_TOKENS, TOKENS_NAMES, ) -from rasa.shared.nlu.constants import ( - TEXT, - ACTION_TEXT, -) +from rasa.shared.nlu.constants import TEXT, ACTION_TEXT from rasa.utils import train_utils logger = logging.getLogger(__name__) @@ -57,7 +54,7 @@ def required_components(cls) -> List[Type]: return [Tokenizer] def __init__( - self, config: Dict[Text, Any], execution_context: ExecutionContext, + self, config: Dict[Text, Any], execution_context: ExecutionContext ) -> None: """Initializes the featurizer with the model in the config.""" super(LanguageModelFeaturizer, self).__init__( @@ -701,7 +698,7 @@ def _get_docs_for_batch( return batch_docs - def process_training_data(self, training_data: TrainingData,) -> TrainingData: + def process_training_data(self, training_data: TrainingData) -> TrainingData: """Computes tokens and dense features for each message in training data. Args: diff --git a/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py index 40455dd0c2f3..b94b9364e36c 100644 --- a/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py +++ b/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py @@ -57,7 +57,7 @@ def required_packages() -> List[Text]: return ["mitie", "numpy"] def __init__( - self, config: Dict[Text, Any], execution_context: ExecutionContext, + self, config: Dict[Text, Any], execution_context: ExecutionContext ) -> None: """Instantiates a new `MitieFeaturizer` instance.""" super().__init__(execution_context.node_name, config) diff --git a/rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py index fdd0d1c03be5..206ed07743e3 100644 --- a/rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py +++ b/rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py @@ -47,7 +47,7 @@ def get_default_config() -> Dict[Text, Any]: POOLING: MEAN_POOLING, } - def __init__(self, config: Dict[Text, Any], name: Text,) -> None: + def __init__(self, config: Dict[Text, Any], name: Text) -> None: """Initializes SpacyFeaturizer.""" super().__init__(name, config) self.pooling_operation = self._config[POOLING] diff --git a/rasa/nlu/featurizers/featurizer.py b/rasa/nlu/featurizers/featurizer.py index 2325f65686b9..41559b3162f2 100644 --- a/rasa/nlu/featurizers/featurizer.py +++ b/rasa/nlu/featurizers/featurizer.py @@ -59,7 +59,7 @@ def add_features_to_message( (FEATURE_TYPE_SENTENCE, sentence), ]: if features is not None: - wrapped_feature = Features(features, type, attribute, self._identifier,) + wrapped_feature = Features(features, type, attribute, self._identifier) message.add_features(wrapped_feature) @staticmethod diff --git a/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py index b342c04b0029..945851d31c85 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py @@ -23,12 +23,7 @@ MESSAGE_ATTRIBUTES, DENSE_FEATURIZABLE_ATTRIBUTES, ) -from rasa.shared.nlu.constants import ( - TEXT, - INTENT, - INTENT_RESPONSE_KEY, - ACTION_NAME, -) +from rasa.shared.nlu.constants import TEXT, INTENT, INTENT_RESPONSE_KEY, ACTION_NAME BUFFER_SLOTS_PREFIX = "buf_" @@ -605,7 +600,7 @@ def _get_featurized_attribute( return [], [] def train( - self, training_data: TrainingData, model: Optional[SpacyModel] = None, + self, training_data: TrainingData, model: Optional[SpacyModel] = None ) -> Resource: """Trains the featurizer. diff --git a/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py index 2c3d5e91f107..205946cb37f8 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py @@ -4,27 +4,13 @@ import scipy.sparse import numpy as np -from typing import ( - Any, - Dict, - Text, - List, - Tuple, - Callable, - Set, - Optional, - Type, - Union, -) +from typing import Any, Dict, Text, List, Tuple, Callable, Set, Optional, Type, Union from rasa.engine.graph import ExecutionContext, GraphComponent from rasa.engine.recipes.default_recipe import DefaultV1Recipe from rasa.engine.storage.resource import Resource from rasa.engine.storage.storage import ModelStorage -from rasa.nlu.tokenizers.spacy_tokenizer import ( - POS_TAG_KEY, - SpacyTokenizer, -) +from rasa.nlu.tokenizers.spacy_tokenizer import POS_TAG_KEY, SpacyTokenizer from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer from rasa.nlu.featurizers.sparse_featurizer.sparse_featurizer import SparseFeaturizer from rasa.nlu.constants import TOKENS_NAMES @@ -102,7 +88,7 @@ class LexicalSyntacticFeaturizer(SparseFeaturizer, GraphComponent): @classmethod def _extract_raw_features_from_token( - cls, feature_name: Text, token: Token, token_position: int, num_tokens: int, + cls, feature_name: Text, token: Token, token_position: int, num_tokens: int ) -> Text: """Extracts a raw feature from the token at the given position. diff --git a/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py index 1219d98227d5..f7773cc1a1b0 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py @@ -15,11 +15,7 @@ from rasa.engine.storage.storage import ModelStorage from rasa.nlu.constants import TOKENS_NAMES from rasa.nlu.featurizers.sparse_featurizer.sparse_featurizer import SparseFeaturizer -from rasa.shared.nlu.constants import ( - TEXT, - RESPONSE, - ACTION_TEXT, -) +from rasa.shared.nlu.constants import TEXT, RESPONSE, ACTION_TEXT from rasa.shared.nlu.training_data.training_data import TrainingData from rasa.shared.nlu.training_data.message import Message @@ -116,7 +112,7 @@ def _merge_new_patterns(self, new_patterns: List[Dict[Text, Text]]) -> None: else: self.known_patterns.append(extra_pattern) - def train(self, training_data: TrainingData,) -> Resource: + def train(self, training_data: TrainingData) -> Resource: """Trains the component with all patterns extracted from training data.""" patterns_from_data = pattern_utils.extract_patterns( training_data, diff --git a/rasa/nlu/selectors/response_selector.py b/rasa/nlu/selectors/response_selector.py index c5c925ef99fa..a68a7278792a 100644 --- a/rasa/nlu/selectors/response_selector.py +++ b/rasa/nlu/selectors/response_selector.py @@ -831,7 +831,7 @@ def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]: # effective sequence length of these features being 1 or 0. # We need to combine the two lengths to correctly get the last position. sentence_feature_lengths = self._get_sentence_feature_lengths( - self.tf_label_data, LABEL, + self.tf_label_data, LABEL ) sentence_label = self._last_token( label_transformed, sequence_feature_lengths + sentence_feature_lengths diff --git a/rasa/nlu/test.py b/rasa/nlu/test.py index b5a4c750bce4..bdec824a1349 100644 --- a/rasa/nlu/test.py +++ b/rasa/nlu/test.py @@ -593,7 +593,7 @@ def evaluate_intents( ) report, precision, f1, accuracy, confusion_matrix, labels = _calculate_report( - output_directory, target_intents, predicted_intents, report_as_dict, + output_directory, target_intents, predicted_intents, report_as_dict ) if output_directory: _dump_report(output_directory, "intent_report.json", report) @@ -670,7 +670,7 @@ def _calculate_report( report_as_dict = bool(output_directory) report, precision, f1, accuracy = get_evaluation_metrics( - targets, predictions, output_dict=report_as_dict, exclude_label=exclude_label, + targets, predictions, output_dict=report_as_dict, exclude_label=exclude_label ) if report_as_dict: @@ -1382,7 +1382,7 @@ async def run_evaluation( from rasa.shared.constants import DEFAULT_DOMAIN_PATH test_data_importer = TrainingDataImporter.load_from_dict( - training_data_paths=[data_path], domain_path=DEFAULT_DOMAIN_PATH, + training_data_paths=[data_path], domain_path=DEFAULT_DOMAIN_PATH ) test_data = test_data_importer.get_nlu_data() diff --git a/rasa/nlu/tokenizers/jieba_tokenizer.py b/rasa/nlu/tokenizers/jieba_tokenizer.py index 8bf2a144069c..352bca79a46b 100644 --- a/rasa/nlu/tokenizers/jieba_tokenizer.py +++ b/rasa/nlu/tokenizers/jieba_tokenizer.py @@ -44,7 +44,7 @@ def get_default_config() -> Dict[Text, Any]: } def __init__( - self, config: Dict[Text, Any], model_storage: ModelStorage, resource: Resource, + self, config: Dict[Text, Any], model_storage: ModelStorage, resource: Resource ) -> None: """Initialize the tokenizer.""" super().__init__(config) diff --git a/rasa/nlu/utils/spacy_utils.py b/rasa/nlu/utils/spacy_utils.py index dbaa67c44e63..fddd2947c261 100644 --- a/rasa/nlu/utils/spacy_utils.py +++ b/rasa/nlu/utils/spacy_utils.py @@ -71,7 +71,7 @@ def get_default_config() -> Dict[Text, Any]: # retrieve the same vector, if set to `False`. For some # applications and models it makes sense to differentiate # between these two words, therefore setting this to `True`. - "case_sensitive": False, + "case_sensitive": False } @staticmethod diff --git a/rasa/server.py b/rasa/server.py index ba0b00514b9b..d3594efb694d 100644 --- a/rasa/server.py +++ b/rasa/server.py @@ -1462,13 +1462,9 @@ def _validate_yaml_training_payload(yaml_text: Text) -> None: def _extract_core_additional_arguments(request: Request) -> Dict[Text, Any]: return { - "augmentation_factor": rasa.utils.endpoints.int_arg( - request, "augmentation", 50 - ), + "augmentation_factor": rasa.utils.endpoints.int_arg(request, "augmentation", 50) } def _extract_nlu_additional_arguments(request: Request) -> Dict[Text, Any]: - return { - "num_threads": rasa.utils.endpoints.int_arg(request, "num_threads", 1), - } + return {"num_threads": rasa.utils.endpoints.int_arg(request, "num_threads", 1)} diff --git a/rasa/shared/core/domain.py b/rasa/shared/core/domain.py index e49d4d531c0b..249c72ac32cb 100644 --- a/rasa/shared/core/domain.py +++ b/rasa/shared/core/domain.py @@ -31,11 +31,7 @@ IGNORED_INTENTS, ) import rasa.shared.core.constants -from rasa.shared.core.constants import ( - SlotMappingType, - MAPPING_TYPE, - MAPPING_CONDITIONS, -) +from rasa.shared.core.constants import SlotMappingType, MAPPING_TYPE, MAPPING_CONDITIONS from rasa.shared.exceptions import RasaException, YamlException, YamlSyntaxException import rasa.shared.utils.validation import rasa.shared.utils.io @@ -233,7 +229,7 @@ def _get_session_config(session_config: Dict) -> SessionConfig: session_expiration_time_min = DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES carry_over_slots = session_config.get( - CARRY_OVER_SLOTS_KEY, DEFAULT_CARRY_OVER_SLOTS_TO_NEW_SESSION, + CARRY_OVER_SLOTS_KEY, DEFAULT_CARRY_OVER_SLOTS_TO_NEW_SESSION ) return SessionConfig(session_expiration_time_min, carry_over_slots) @@ -1012,7 +1008,7 @@ def _get_user_sub_state( @staticmethod def _get_slots_sub_state( - tracker: "DialogueStateTracker", omit_unset_slots: bool = False, + tracker: "DialogueStateTracker", omit_unset_slots: bool = False ) -> Dict[Text, Union[Text, Tuple[float]]]: """Sets all set slots with the featurization of the stored value. @@ -1056,16 +1052,14 @@ def _get_prev_action_sub_state( return tracker.latest_action @staticmethod - def _get_active_loop_sub_state( - tracker: "DialogueStateTracker", - ) -> Dict[Text, Text]: + def _get_active_loop_sub_state(tracker: "DialogueStateTracker") -> Dict[Text, Text]: """Turn tracker's active loop into a state name. + Args: tracker: dialog state tracker containing the dialog so far Returns: a dictionary mapping "name" to active loop name if present """ - # we don't use tracker.active_loop_name # because we need to keep should_not_be_set active_loop: Optional[Text] = tracker.active_loop.get( @@ -1085,7 +1079,7 @@ def _clean_state(state: State) -> State: } def get_active_state( - self, tracker: "DialogueStateTracker", omit_unset_slots: bool = False, + self, tracker: "DialogueStateTracker", omit_unset_slots: bool = False ) -> State: """Given a dialogue tracker, makes a representation of current dialogue state. @@ -1112,7 +1106,7 @@ def get_active_state( @staticmethod def _remove_rule_only_features( - state: State, rule_only_data: Optional[Dict[Text, Any]], + state: State, rule_only_data: Optional[Dict[Text, Any]] ) -> None: if not rule_only_data: return diff --git a/rasa/shared/core/events.py b/rasa/shared/core/events.py index bd4fad0dbc0c..cc5a7eb57fda 100644 --- a/rasa/shared/core/events.py +++ b/rasa/shared/core/events.py @@ -82,7 +82,7 @@ ) IntentPrediction = TypedDict( - "IntentPrediction", {INTENT_NAME_KEY: Text, PREDICTED_CONFIDENCE_KEY: float,}, + "IntentPrediction", {INTENT_NAME_KEY: Text, PREDICTED_CONFIDENCE_KEY: float} ) NLUPredictionData = TypedDict( "NLUPredictionData", @@ -553,7 +553,7 @@ def as_dict(self) -> Dict[Text, Any]: ) return _dict - def as_sub_state(self,) -> Dict[Text, Union[None, Text, List[Optional[Text]]]]: + def as_sub_state(self) -> Dict[Text, Union[None, Text, List[Optional[Text]]]]: """Turns a UserUttered event into features. The substate contains information about entities, intent and text of the @@ -1525,11 +1525,7 @@ def __init__( def __members__(self) -> Tuple[Optional[Text], Optional[Text], Text]: meta_no_nones = {k: v for k, v in self.metadata.items() if v is not None} - return ( - self.action_name, - self.action_text, - jsonpickle.encode(meta_no_nones), - ) + return (self.action_name, self.action_text, jsonpickle.encode(meta_no_nones)) def __repr__(self) -> Text: """Returns event as string for debugging.""" diff --git a/rasa/shared/core/generator.py b/rasa/shared/core/generator.py index 6c6120e83d62..3b6d0c4c02a1 100644 --- a/rasa/shared/core/generator.py +++ b/rasa/shared/core/generator.py @@ -85,7 +85,7 @@ def from_events( return tracker def past_states_for_hashing( - self, domain: Domain, omit_unset_slots: bool = False, + self, domain: Domain, omit_unset_slots: bool = False ) -> Deque[FrozenState]: """Generates and caches the past states of this tracker based on the history. @@ -190,8 +190,7 @@ def _append_current_state(self) -> None: self._states_for_hashing.append(frozen_state) def update(self, event: Event, skip_states: bool = False) -> None: - """Modify the state of the tracker according to an ``Event``. """ - + """Modify the state of the tracker according to an ``Event``.""" # if `skip_states` is `True`, this function behaves exactly like the # normal update of the `DialogueStateTracker` diff --git a/rasa/shared/core/slot_mappings.py b/rasa/shared/core/slot_mappings.py index 2c98563ee057..18eb65c6e3bf 100644 --- a/rasa/shared/core/slot_mappings.py +++ b/rasa/shared/core/slot_mappings.py @@ -74,7 +74,7 @@ def validate(mapping: Dict[Text, Any], slot_name: Text) -> None: @staticmethod def _get_active_loop_ignored_intents( - mapping: Dict[Text, Any], domain: "Domain", active_loop_name: Text, + mapping: Dict[Text, Any], domain: "Domain", active_loop_name: Text ) -> List[Text]: from rasa.shared.core.constants import ACTIVE_LOOP @@ -99,7 +99,7 @@ def _get_active_loop_ignored_intents( @staticmethod def intent_is_desired( - mapping: Dict[Text, Any], tracker: "DialogueStateTracker", domain: "Domain", + mapping: Dict[Text, Any], tracker: "DialogueStateTracker", domain: "Domain" ) -> bool: """Checks whether user intent matches slot mapping intent specifications.""" mapping_intents = SlotMapping.to_list(mapping.get(INTENT, [])) @@ -133,7 +133,7 @@ def to_list(x: Optional[Any]) -> List[Any]: @staticmethod def entity_is_desired( - mapping: Dict[Text, Any], tracker: "DialogueStateTracker", + mapping: Dict[Text, Any], tracker: "DialogueStateTracker" ) -> bool: """Checks whether slot should be filled by an entity in the input or not. diff --git a/rasa/shared/core/slots.py b/rasa/shared/core/slots.py index bc63ca25a3da..bb3496b3aaca 100644 --- a/rasa/shared/core/slots.py +++ b/rasa/shared/core/slots.py @@ -182,7 +182,7 @@ def __init__( UserWarning, if initial_value is outside the min-max range. """ super().__init__( - name, mappings, initial_value, value_reset_delay, influence_conversation, + name, mappings, initial_value, value_reset_delay, influence_conversation ) self.max_value = max_value self.min_value = min_value @@ -244,8 +244,7 @@ def _feature_dimensionality(self) -> int: def bool_from_any(x: Any) -> bool: - """ Converts bool/float/int/str to bool or raises error """ - + """Converts bool/float/int/str to bool or raises error.""" if isinstance(x, bool): return x elif isinstance(x, (float, int)): @@ -310,7 +309,7 @@ def __init__( ) -> None: """Creates a `Categorical Slot` (see parent class for detailed docstring).""" super().__init__( - name, mappings, initial_value, value_reset_delay, influence_conversation, + name, mappings, initial_value, value_reset_delay, influence_conversation ) if values and None in values: rasa.shared.utils.io.raise_warning( @@ -422,7 +421,7 @@ def __init__( ) super().__init__( - name, mappings, initial_value, value_reset_delay, influence_conversation, + name, mappings, initial_value, value_reset_delay, influence_conversation ) def __eq__(self, other: Any) -> bool: diff --git a/rasa/shared/core/trackers.py b/rasa/shared/core/trackers.py index 969f3b974157..f9982fb6505b 100644 --- a/rasa/shared/core/trackers.py +++ b/rasa/shared/core/trackers.py @@ -579,7 +579,7 @@ def _undo_till_previous_loop_execution( break if isinstance( - e, (ActionExecuted, UserUttered, DefinePrevUserUtteredFeaturization), + e, (ActionExecuted, UserUttered, DefinePrevUserUtteredFeaturization) ): del done_events[-1 - offset] else: @@ -640,7 +640,7 @@ def as_dialogue(self) -> Dialogue: return Dialogue(self.sender_id, list(self.events)) def update(self, event: Event, domain: Optional[Domain] = None) -> None: - """Modify the state of the tracker according to an ``Event``. """ + """Modify the state of the tracker according to an ``Event``.""" if not isinstance(event, Event): # pragma: no cover raise ValueError("event to log must be an instance of a subclass of Event.") diff --git a/rasa/shared/core/training_data/loading.py b/rasa/shared/core/training_data/loading.py index d3ddd397a2d1..2d748fb4041f 100644 --- a/rasa/shared/core/training_data/loading.py +++ b/rasa/shared/core/training_data/loading.py @@ -15,7 +15,7 @@ logger = logging.getLogger(__name__) -def _get_reader(filename: Text, domain: Domain,) -> StoryReader: +def _get_reader(filename: Text, domain: Domain) -> StoryReader: if rasa.shared.data.is_likely_yaml_file(filename): return YAMLStoryReader(domain, filename) else: @@ -24,7 +24,7 @@ def _get_reader(filename: Text, domain: Domain,) -> StoryReader: return _guess_reader(filename, domain) -def _guess_reader(filename: Text, domain: Domain,) -> StoryReader: +def _guess_reader(filename: Text, domain: Domain) -> StoryReader: if YAMLStoryReader.is_stories_file(filename): return YAMLStoryReader(domain, filename) @@ -36,7 +36,7 @@ def _guess_reader(filename: Text, domain: Domain,) -> StoryReader: def load_data_from_resource( - resource: Union[Text], domain: Domain, exclusion_percentage: Optional[int] = None, + resource: Union[Text], domain: Domain, exclusion_percentage: Optional[int] = None ) -> List["StoryStep"]: """Loads core training data from the specified folder. @@ -53,12 +53,12 @@ def load_data_from_resource( raise ValueError(f"Resource '{resource}' does not exist.") return load_data_from_files( - rasa.shared.utils.io.list_files(resource), domain, exclusion_percentage, + rasa.shared.utils.io.list_files(resource), domain, exclusion_percentage ) def load_data_from_files( - story_files: List[Text], domain: Domain, exclusion_percentage: Optional[int] = None, + story_files: List[Text], domain: Domain, exclusion_percentage: Optional[int] = None ) -> List["StoryStep"]: """Loads core training data from the specified files. diff --git a/rasa/shared/core/training_data/story_reader/story_reader.py b/rasa/shared/core/training_data/story_reader/story_reader.py index abb571a27f01..6d2a32785f49 100644 --- a/rasa/shared/core/training_data/story_reader/story_reader.py +++ b/rasa/shared/core/training_data/story_reader/story_reader.py @@ -17,7 +17,7 @@ class StoryReader: """Helper class to read a story file.""" def __init__( - self, domain: Optional[Domain] = None, source_name: Optional[Text] = None, + self, domain: Optional[Domain] = None, source_name: Optional[Text] = None ) -> None: """Constructor for the StoryReader. diff --git a/rasa/shared/core/training_data/story_reader/yaml_story_reader.py b/rasa/shared/core/training_data/story_reader/yaml_story_reader.py index 76e3bdc17ecc..4d21f157b1c9 100644 --- a/rasa/shared/core/training_data/story_reader/yaml_story_reader.py +++ b/rasa/shared/core/training_data/story_reader/yaml_story_reader.py @@ -656,7 +656,7 @@ def unpack_regex_message( PREDICTED_CONFIDENCE_KEY: confidence, } intent_ranking = [ - {INTENT_NAME_KEY: intent_name, PREDICTED_CONFIDENCE_KEY: confidence,} + {INTENT_NAME_KEY: intent_name, PREDICTED_CONFIDENCE_KEY: confidence} ] message_data = {} message_data[TEXT] = user_text diff --git a/rasa/shared/core/training_data/story_writer/yaml_story_writer.py b/rasa/shared/core/training_data/story_writer/yaml_story_writer.py index ae25a94d7a96..9bf31e0eb0be 100644 --- a/rasa/shared/core/training_data/story_writer/yaml_story_writer.py +++ b/rasa/shared/core/training_data/story_writer/yaml_story_writer.py @@ -1,13 +1,6 @@ from collections import OrderedDict from pathlib import Path -from typing import ( - Any, - Dict, - List, - Text, - Union, - Optional, -) +from typing import Any, Dict, List, Text, Union, Optional from ruamel import yaml from ruamel.yaml.comments import CommentedMap @@ -56,7 +49,7 @@ class YAMLStoryWriter(StoryWriter): - """Writes Core training data into a file in a YAML format. """ + """Writes Core training data into a file in a YAML format.""" def dumps( self, @@ -229,7 +222,7 @@ def process_user_utterance( [(entity["entity"], entity["value"])] ) entity_map.yaml_add_eol_comment( - commented_entity, entity["entity"], + commented_entity, entity["entity"] ) entities.append(entity_map) else: diff --git a/rasa/shared/core/training_data/structures.py b/rasa/shared/core/training_data/structures.py index 40461e6ba3bd..685b6f024d52 100644 --- a/rasa/shared/core/training_data/structures.py +++ b/rasa/shared/core/training_data/structures.py @@ -305,9 +305,9 @@ def __repr__(self) -> Text: class RuleStep(StoryStep): - """A Special type of StoryStep representing a Rule. """ + """A Special type of StoryStep representing a Rule.""" - def __init__( + def __init__( # noqa: D107 self, block_name: Optional[Text] = None, start_checkpoints: Optional[List[Checkpoint]] = None, @@ -351,8 +351,7 @@ def __repr__(self) -> Text: ) def get_rules_condition(self) -> List[Event]: - """Returns a list of events forming a condition of the Rule. """ - + """Returns a list of events forming a condition of the Rule.""" return [ event for event_id, event in enumerate(self.events) @@ -360,8 +359,7 @@ def get_rules_condition(self) -> List[Event]: ] def get_rules_events(self) -> List[Event]: - """Returns a list of events forming the Rule, that are not conditions. """ - + """Returns a list of events forming the Rule, that are not conditions.""" return [ event for event_id, event in enumerate(self.events) diff --git a/rasa/shared/core/training_data/visualization.py b/rasa/shared/core/training_data/visualization.py index 99dd98b21ba1..a237ac2f63a4 100644 --- a/rasa/shared/core/training_data/visualization.py +++ b/rasa/shared/core/training_data/visualization.py @@ -256,7 +256,7 @@ def _merge_equivalent_nodes(graph: "networkx.MultiDiGraph", max_history: int) -> def _replace_edge_labels_with_nodes( - graph: "networkx.MultiDiGraph", next_id: int, nlu_training_data: "TrainingData", + graph: "networkx.MultiDiGraph", next_id: int, nlu_training_data: "TrainingData" ) -> None: """Replaces edge labels with nodes. diff --git a/rasa/shared/data.py b/rasa/shared/data.py index ffabb6923725..d665e9daef8e 100644 --- a/rasa/shared/data.py +++ b/rasa/shared/data.py @@ -56,7 +56,7 @@ def get_core_directory(paths: Optional[Union[Text, List[Text]]]) -> Text: return _copy_files_to_new_dir(core_files) -def get_nlu_directory(paths: Optional[Union[Text, List[Text]]],) -> Text: +def get_nlu_directory(paths: Optional[Union[Text, List[Text]]]) -> Text: """Recursively collects all NLU training files from a list of paths. Args: diff --git a/rasa/shared/importers/importer.py b/rasa/shared/importers/importer.py index 946ec0ff4e51..12e0a50e780f 100644 --- a/rasa/shared/importers/importer.py +++ b/rasa/shared/importers/importer.py @@ -29,7 +29,7 @@ def get_domain(self) -> Domain: """ raise NotImplementedError() - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories(self, exclusion_percentage: Optional[int] = None) -> StoryGraph: """Retrieves the stories that should be used for training. Args: @@ -204,7 +204,7 @@ def get_domain(self) -> Domain: """Retrieves model domain (see parent class for full docstring).""" return Domain.empty() - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories(self, exclusion_percentage: Optional[int] = None) -> StoryGraph: """Retrieves training stories / rules (see parent class for full docstring).""" return StoryGraph([]) @@ -248,7 +248,7 @@ def get_domain(self) -> Domain: ) @rasa.shared.utils.common.cached_method - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories(self, exclusion_percentage: Optional[int] = None) -> StoryGraph: """Retrieves training stories / rules (see parent class for full docstring).""" stories = [ importer.get_stories(exclusion_percentage) for importer in self._importers @@ -370,7 +370,7 @@ def _get_domain_with_retrieval_intents( {}, ) - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories(self, exclusion_percentage: Optional[int] = None) -> StoryGraph: """Retrieves training stories / rules (see parent class for full docstring).""" return self._importer.get_stories(exclusion_percentage) @@ -450,7 +450,7 @@ def _get_domain_with_e2e_actions(self) -> Domain: action_texts=additional_e2e_action_names, ) - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories(self, exclusion_percentage: Optional[int] = None) -> StoryGraph: """Retrieves the stories that should be used for training. See parent class for details. diff --git a/rasa/shared/importers/multi_project.py b/rasa/shared/importers/multi_project.py index 0ee50e9e82e9..7c87c7e8df83 100644 --- a/rasa/shared/importers/multi_project.py +++ b/rasa/shared/importers/multi_project.py @@ -178,10 +178,10 @@ def get_domain(self) -> Domain: lambda merged, other: merged.merge(other), domains, Domain.empty() ) - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories(self, exclusion_percentage: Optional[int] = None) -> StoryGraph: """Retrieves training stories / rules (see parent class for full docstring).""" return utils.story_graph_from_paths( - self._story_paths, self.get_domain(), exclusion_percentage, + self._story_paths, self.get_domain(), exclusion_percentage ) def get_conversation_tests(self) -> StoryGraph: diff --git a/rasa/shared/importers/rasa.py b/rasa/shared/importers/rasa.py index ae2ca838fa36..4e26b7d71b29 100644 --- a/rasa/shared/importers/rasa.py +++ b/rasa/shared/importers/rasa.py @@ -46,16 +46,16 @@ def get_config(self) -> Dict: """Retrieves model config (see parent class for full docstring).""" return self.config - def get_stories(self, exclusion_percentage: Optional[int] = None,) -> StoryGraph: + def get_stories(self, exclusion_percentage: Optional[int] = None) -> StoryGraph: """Retrieves training stories / rules (see parent class for full docstring).""" return utils.story_graph_from_paths( - self._story_files, self.get_domain(), exclusion_percentage, + self._story_files, self.get_domain(), exclusion_percentage ) def get_conversation_tests(self) -> StoryGraph: """Retrieves conversation test stories (see parent class for full docstring).""" return utils.story_graph_from_paths( - self._conversation_test_files, self.get_domain(), + self._conversation_test_files, self.get_domain() ) def get_nlu_data(self, language: Optional[Text] = "en") -> TrainingData: diff --git a/rasa/shared/importers/utils.py b/rasa/shared/importers/utils.py index cb6119b7eb51..40e5bb21e51b 100644 --- a/rasa/shared/importers/utils.py +++ b/rasa/shared/importers/utils.py @@ -13,7 +13,7 @@ def training_data_from_paths(paths: Iterable[Text], language: Text) -> TrainingD def story_graph_from_paths( - files: List[Text], domain: Domain, exclusion_percentage: Optional[int] = None, + files: List[Text], domain: Domain, exclusion_percentage: Optional[int] = None ) -> StoryGraph: """Returns the `StoryGraph` from paths.""" from rasa.shared.core.training_data import loading diff --git a/rasa/shared/nlu/constants.py b/rasa/shared/nlu/constants.py index 0c89d0d667ab..1f0b9b865c36 100644 --- a/rasa/shared/nlu/constants.py +++ b/rasa/shared/nlu/constants.py @@ -23,10 +23,7 @@ VALID_FEATURE_TYPES = [FEATURE_TYPE_SEQUENCE, FEATURE_TYPE_SENTENCE] EXTRACTOR = "extractor" -PRETRAINED_EXTRACTORS = { - "DucklingEntityExtractor", - "SpacyEntityExtractor", -} +PRETRAINED_EXTRACTORS = {"DucklingEntityExtractor", "SpacyEntityExtractor"} TRAINABLE_EXTRACTORS = {"MitieEntityExtractor", "CRFEntityExtractor", "DIETClassifier"} ENTITIES = "entities" diff --git a/rasa/shared/nlu/training_data/features.py b/rasa/shared/nlu/training_data/features.py index c2266e2b8779..273b3ad22631 100644 --- a/rasa/shared/nlu/training_data/features.py +++ b/rasa/shared/nlu/training_data/features.py @@ -205,7 +205,7 @@ def filter( @staticmethod def groupby_attribute( - features_list: List[Features], attributes: Optional[Iterable[Text]] = None, + features_list: List[Features], attributes: Optional[Iterable[Text]] = None ) -> Dict[Text, List[Features]]: """Groups the given features according to their attribute. @@ -236,7 +236,7 @@ def groupby_attribute( @staticmethod def combine( - features_list: List[Features], expected_origins: Optional[List[Text]] = None, + features_list: List[Features], expected_origins: Optional[List[Text]] = None ) -> Features: """Combine features of the same type and level that describe the same attribute. @@ -361,11 +361,11 @@ def reduce( # sequence feature that is (not) sparse before sentence feature that is # (not) sparse sublist = Features.filter( - features_list=features_list, type=type, is_sparse=is_sparse, + features_list=features_list, type=type, is_sparse=is_sparse ) if sublist: combined_feature = Features.combine( - sublist, expected_origins=expected_origins, + sublist, expected_origins=expected_origins ) output.append(combined_feature) return output diff --git a/rasa/shared/nlu/training_data/formats/dialogflow.py b/rasa/shared/nlu/training_data/formats/dialogflow.py index 76d8ad8cf78c..10c5c4d21dcc 100644 --- a/rasa/shared/nlu/training_data/formats/dialogflow.py +++ b/rasa/shared/nlu/training_data/formats/dialogflow.py @@ -130,10 +130,10 @@ def _read_entities( if entity["isRegexp"]: regex_features = DialogflowReader._extract_regex_features(entity, examples) - return TrainingData([], entity_synonyms, regex_features, [],) + return TrainingData([], entity_synonyms, regex_features, []) else: lookup_tables = DialogflowReader._extract_lookup_tables(entity, examples) - return TrainingData([], entity_synonyms, [], lookup_tables,) + return TrainingData([], entity_synonyms, [], lookup_tables) @staticmethod def _read_examples( diff --git a/rasa/shared/nlu/training_data/util.py b/rasa/shared/nlu/training_data/util.py index bf4978a9e3eb..56ff34b065ba 100644 --- a/rasa/shared/nlu/training_data/util.py +++ b/rasa/shared/nlu/training_data/util.py @@ -80,9 +80,7 @@ def get_file_format_extension(resource_name: Text) -> Text: if not file_formats: return rasa.shared.data.yaml_file_extension() - known_file_formats = { - loading.RASA_YAML: rasa.shared.data.yaml_file_extension(), - } + known_file_formats = {loading.RASA_YAML: rasa.shared.data.yaml_file_extension()} fformat = file_formats[0] if all(f == fformat for f in file_formats): return known_file_formats.get(fformat, rasa.shared.data.yaml_file_extension()) diff --git a/rasa/shared/utils/pykwalify_extensions.py b/rasa/shared/utils/pykwalify_extensions.py index 37874132a19d..5d998208b059 100644 --- a/rasa/shared/utils/pykwalify_extensions.py +++ b/rasa/shared/utils/pykwalify_extensions.py @@ -21,7 +21,7 @@ def require_response_keys( if response.get("text") is None and not response.get("custom"): return SchemaError( "Missing 'text' or 'custom' key in response or " - "null 'text' value in response.", + "null 'text' value in response." ) return True diff --git a/rasa/shared/utils/schemas/events.py b/rasa/shared/utils/schemas/events.py index ba039a4eac94..ac9ebeef3f12 100644 --- a/rasa/shared/utils/schemas/events.py +++ b/rasa/shared/utils/schemas/events.py @@ -18,7 +18,7 @@ INTENT = { "type": "object", - "properties": {"name": {"type": "string"}, "confidence": {"type": "number"},}, + "properties": {"name": {"type": "string"}, "confidence": {"type": "number"}}, } RESPONSE_SCHEMA = { @@ -76,14 +76,14 @@ "response": RESPONSE_SCHEMA, "ranking": RANKING_SCHEMA, }, - }, - }, + } + } }, ], }, }, }, - }, + } } ACTION_EXECUTED = { @@ -98,11 +98,7 @@ } SLOT_SET = { - "properties": { - "event": {"const": "slot"}, - "name": {"type": "string"}, - "value": {}, - }, + "properties": {"event": {"const": "slot"}, "name": {"type": "string"}, "value": {}}, "required": ["name", "value"], } diff --git a/rasa/telemetry.py b/rasa/telemetry.py index 4a5377525ee1..57b9dd99b976 100644 --- a/rasa/telemetry.py +++ b/rasa/telemetry.py @@ -454,10 +454,11 @@ def _is_docker() -> bool: def with_default_context_fields( - context: Optional[Dict[Text, Any]] = None, + context: Optional[Dict[Text, Any]] = None ) -> Dict[Text, Any]: - """Return a new context dictionary that contains the default field values merged - with the provided ones. The default fields contain only the OS information for now. + """Return a new context dictionary with default and provided field values merged. + + The default fields contain only the OS information for now. Args: context: Context information about the event. @@ -1013,7 +1014,7 @@ def track_nlu_model_test(test_data: "TrainingData") -> None: @ensure_telemetry_enabled def track_markers_extraction_initiated( - strategy: Text, only_extract: bool, seed: bool, count: Optional[int], + strategy: Text, only_extract: bool, seed: bool, count: Optional[int] ) -> None: """Track when a user tries to extract success markers. @@ -1042,9 +1043,7 @@ def track_markers_extracted(trackers_count: int) -> None: Args: trackers_count: The actual number of trackers processed """ - _track( - TELEMETRY_MARKERS_EXTRACTED_EVENT, {"trackers_count": trackers_count}, - ) + _track(TELEMETRY_MARKERS_EXTRACTED_EVENT, {"trackers_count": trackers_count}) @ensure_telemetry_enabled @@ -1054,9 +1053,7 @@ def track_markers_stats_computed(trackers_count: int) -> None: Args: trackers_count: The actual number of trackers processed """ - _track( - TELEMETRY_MARKERS_STATS_COMPUTED_EVENT, {"trackers_count": trackers_count}, - ) + _track(TELEMETRY_MARKERS_STATS_COMPUTED_EVENT, {"trackers_count": trackers_count}) @ensure_telemetry_enabled diff --git a/rasa/utils/common.py b/rasa/utils/common.py index 9974a5e81cc5..9ffd1546086a 100644 --- a/rasa/utils/common.py +++ b/rasa/utils/common.py @@ -43,7 +43,7 @@ "Converting sparse IndexedSlices.* to a dense Tensor of unknown " "shape. This may consume a large amount of memory.", ), - (UserWarning, "Slot auto-fill has been removed in 3.0 .*",), + (UserWarning, "Slot auto-fill has been removed in 3.0 .*"), ] @@ -213,7 +213,7 @@ def update_sanic_log_level( ) socktype = SOCK_STREAM if syslog_protocol == TCP_PROTOCOL else SOCK_DGRAM syslog_handler = logging.handlers.SysLogHandler( - address=(syslog_address, syslog_port), socktype=socktype, + address=(syslog_address, syslog_port), socktype=socktype ) syslog_handler.setFormatter(formatter) logger.addHandler(syslog_handler) diff --git a/rasa/utils/io.py b/rasa/utils/io.py index 6be2771317cf..3388ef98b049 100644 --- a/rasa/utils/io.py +++ b/rasa/utils/io.py @@ -8,16 +8,7 @@ import re from asyncio import AbstractEventLoop from pathlib import Path -from typing import ( - Text, - Any, - Union, - List, - Type, - Callable, - TYPE_CHECKING, - Pattern, -) +from typing import Text, Any, Union, List, Type, Callable, TYPE_CHECKING, Pattern from typing_extensions import Protocol import rasa.shared.constants diff --git a/rasa/utils/plotting.py b/rasa/utils/plotting.py index b7ba782f3e79..00e05fbbced3 100644 --- a/rasa/utils/plotting.py +++ b/rasa/utils/plotting.py @@ -221,7 +221,7 @@ def plot_paired_histogram( title: Text, output_file: Optional[Text] = None, num_bins: int = 25, - colors: Tuple[Text, Text] = ("#009292", "#920000",), # (dark cyan, dark red) + colors: Tuple[Text, Text] = ("#009292", "#920000"), # (dark cyan, dark red) axes_label: Tuple[Text, Text] = ("Correct", "Wrong"), frame_label: Tuple[Text, Text] = ("Number of Samples", "Confidence"), density: bool = False, diff --git a/rasa/utils/tensorflow/layers.py b/rasa/utils/tensorflow/layers.py index 87b168592c28..b434c6bbde6c 100644 --- a/rasa/utils/tensorflow/layers.py +++ b/rasa/utils/tensorflow/layers.py @@ -15,16 +15,8 @@ LABEL_PAD_ID, ) from rasa.core.constants import DIALOGUE -from rasa.shared.nlu.constants import ( - FEATURE_TYPE_SENTENCE, - FEATURE_TYPE_SEQUENCE, -) -from rasa.shared.nlu.constants import ( - TEXT, - INTENT, - ACTION_NAME, - ACTION_TEXT, -) +from rasa.shared.nlu.constants import FEATURE_TYPE_SENTENCE, FEATURE_TYPE_SEQUENCE +from rasa.shared.nlu.constants import TEXT, INTENT, ACTION_NAME, ACTION_TEXT from rasa.utils.tensorflow.exceptions import TFLayerConfigException import rasa.utils.tensorflow.layers_utils as layers_utils @@ -554,10 +546,7 @@ def x_masked() -> tf.Tensor: return tf.where(tf.tile(lm_mask_bool, (1, 1, x.shape[-1])), x_other, x) - return ( - smart_cond(training, x_masked, lambda: tf.identity(x)), - lm_mask_bool, - ) + return (smart_cond(training, x_masked, lambda: tf.identity(x)), lm_mask_bool) def _scale_loss(log_likelihood: tf.Tensor) -> tf.Tensor: @@ -1439,7 +1428,7 @@ def _sample_candidates( ) pos_labels_embed = tf.expand_dims( - batch_labels_embed, axis=1, name="expand_pos_labels", + batch_labels_embed, axis=1, name="expand_pos_labels" ) # Pick random examples from the batch @@ -1457,7 +1446,7 @@ def _sample_candidates( # Get binary indicators of whether a candidate is positive or not pos_neg_indicators = self._get_pos_neg_indicators( - all_labels_ids, batch_labels_ids, candidate_ids, + all_labels_ids, batch_labels_ids, candidate_ids ) return ( diff --git a/rasa/utils/tensorflow/layers_utils.py b/rasa/utils/tensorflow/layers_utils.py index 7d97eb997c69..28d705c34831 100644 --- a/rasa/utils/tensorflow/layers_utils.py +++ b/rasa/utils/tensorflow/layers_utils.py @@ -64,7 +64,7 @@ def get_candidate_values( ``` """ tiled_x = tf.tile( - tf.expand_dims(batch_flatten(x), 0), (tf.shape(candidate_ids)[0], 1, 1), + tf.expand_dims(batch_flatten(x), 0), (tf.shape(candidate_ids)[0], 1, 1) ) candidate_values = tf.gather(tiled_x, candidate_ids, batch_dims=1) diff --git a/rasa/utils/tensorflow/model_data.py b/rasa/utils/tensorflow/model_data.py index 29b48a6894e2..0e0a53d15513 100644 --- a/rasa/utils/tensorflow/model_data.py +++ b/rasa/utils/tensorflow/model_data.py @@ -4,17 +4,7 @@ import scipy.sparse from sklearn.model_selection import train_test_split -from typing import ( - Optional, - Dict, - Text, - List, - Tuple, - Any, - Union, - NamedTuple, - ItemsView, -) +from typing import Optional, Dict, Text, List, Tuple, Any, Union, NamedTuple, ItemsView from collections import defaultdict, OrderedDict logger = logging.getLogger(__name__) diff --git a/rasa/utils/tensorflow/models.py b/rasa/utils/tensorflow/models.py index f1575b593581..1ad55cba6f7c 100644 --- a/rasa/utils/tensorflow/models.py +++ b/rasa/utils/tensorflow/models.py @@ -3,15 +3,7 @@ import logging import random from collections import defaultdict -from typing import ( - List, - Text, - Dict, - Tuple, - Union, - Optional, - Any, -) +from typing import List, Text, Dict, Tuple, Union, Optional, Any from rasa.shared.constants import DIAGNOSTIC_DATA from rasa.utils.tensorflow.constants import ( @@ -303,8 +295,8 @@ def run_inference( Model outputs corresponding to the inputs fed. """ outputs = {} - (data_generator, _,) = rasa.utils.train_utils.create_data_generators( - model_data=model_data, batch_sizes=batch_size, epochs=1, shuffle=False, + (data_generator, _) = rasa.utils.train_utils.create_data_generators( + model_data=model_data, batch_sizes=batch_size, epochs=1, shuffle=False ) data_iterator = iter(data_generator) while True: @@ -554,9 +546,7 @@ def __init__( data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]], label_data: RasaModelData, ) -> None: - super().__init__( - name=name, random_seed=config[RANDOM_SEED], - ) + super().__init__(name=name, random_seed=config[RANDOM_SEED]) self.config = config self.data_signature = data_signature @@ -744,9 +734,7 @@ def _prepare_label_classification_layers(self, predictor_attribute: Text) -> Non def _prepare_embed_layers(self, name: Text, prefix: Text = "embed") -> None: self._tf_layers[f"{prefix}.{name}"] = layers.Embed( - self.config[EMBEDDING_DIMENSION], - self.config[REGULARIZATION_CONSTANT], - name, + self.config[EMBEDDING_DIMENSION], self.config[REGULARIZATION_CONSTANT], name ) def _prepare_ffnn_layer( @@ -765,7 +753,7 @@ def _prepare_ffnn_layer( ) def _prepare_dot_product_loss( - self, name: Text, scale_loss: bool, prefix: Text = "loss", + self, name: Text, scale_loss: bool, prefix: Text = "loss" ) -> None: self._tf_layers[f"{prefix}.{name}"] = self.dot_product_loss_layer( self.config[NUM_NEG], @@ -840,7 +828,7 @@ def _get_sequence_feature_lengths( return tf.zeros([batch_dim], dtype=tf.int32) def _get_sentence_feature_lengths( - self, tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]], key: Text, + self, tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]], key: Text ) -> tf.Tensor: """Fetches the sequence lengths of sentence-level features per input example. diff --git a/rasa/utils/tensorflow/rasa_layers.py b/rasa/utils/tensorflow/rasa_layers.py index 149aeff474d7..8b68eceaa167 100644 --- a/rasa/utils/tensorflow/rasa_layers.py +++ b/rasa/utils/tensorflow/rasa_layers.py @@ -253,7 +253,7 @@ def _check_sparse_input_units( ) def _prepare_layers_for_sparse_tensors( - self, attribute: Text, feature_type: Text, config: Dict[Text, Any], + self, attribute: Text, feature_type: Text, config: Dict[Text, Any] ) -> None: """Sets up sparse tensor pre-processing before combining with dense ones.""" # For optionally applying dropout to sparse tensors diff --git a/rasa/utils/tensorflow/temp_keras_modules.py b/rasa/utils/tensorflow/temp_keras_modules.py index e8d222e13e81..11b21aa9ca0d 100644 --- a/rasa/utils/tensorflow/temp_keras_modules.py +++ b/rasa/utils/tensorflow/temp_keras_modules.py @@ -1,14 +1,5 @@ import copy -from typing import ( - List, - Dict, - Union, - Optional, - Any, - Generator, - Tuple, - Iterator, -) +from typing import List, Dict, Union, Optional, Any, Generator, Tuple, Iterator import numpy as np import tensorflow as tf diff --git a/rasa/utils/train_utils.py b/rasa/utils/train_utils.py index 7e7c6d233876..56527bcca815 100644 --- a/rasa/utils/train_utils.py +++ b/rasa/utils/train_utils.py @@ -323,7 +323,7 @@ def create_data_generators( validation_data_generator = None if eval_num_examples > 0: model_data, evaluation_model_data = model_data.split( - eval_num_examples, random_seed, + eval_num_examples, random_seed ) validation_data_generator = RasaBatchDataGenerator( evaluation_model_data, diff --git a/rasa/validator.py b/rasa/validator.py index 06d035070446..3f24ead503aa 100644 --- a/rasa/validator.py +++ b/rasa/validator.py @@ -295,7 +295,7 @@ def verify_story_structure( # Create a list of `StoryConflict` objects conflicts = rasa.core.training.story_conflict.find_story_conflicts( - trackers, self.domain, max_history, + trackers, self.domain, max_history ) if not conflicts: diff --git a/tests/cli/test_rasa_data.py b/tests/cli/test_rasa_data.py index fe1fd7a3ac2a..00041c82ad09 100644 --- a/tests/cli/test_rasa_data.py +++ b/tests/cli/test_rasa_data.py @@ -290,11 +290,6 @@ def test_validate_files_invalid_slot_mappings(tmp_path: Path): - location """ ) - args = { - "domain": str(domain), - "data": None, - "max_history": None, - "config": None, - } + args = {"domain": str(domain), "data": None, "max_history": None, "config": None} with pytest.raises(SystemExit): data.validate_files(namedtuple("Args", args.keys())(*args.values())) diff --git a/tests/cli/test_rasa_evaluate_markers.py b/tests/cli/test_rasa_evaluate_markers.py index 7abf506725d5..ce4762adf028 100644 --- a/tests/cli/test_rasa_evaluate_markers.py +++ b/tests/cli/test_rasa_evaluate_markers.py @@ -114,7 +114,7 @@ def test_markers_cli_results_save_correctly( endpoints_path = write_endpoint_config_to_yaml( tmp_path, - {"tracker_store": {"type": "sql", "db": db_path.replace("\\", "\\\\")},}, + {"tracker_store": {"type": "sql", "db": db_path.replace("\\", "\\\\")}}, ) markers_path = write_markers_config_to_yaml( diff --git a/tests/cli/test_rasa_init.py b/tests/cli/test_rasa_init.py index 9647937a6598..f442dc1885ee 100644 --- a/tests/cli/test_rasa_init.py +++ b/tests/cli/test_rasa_init.py @@ -77,7 +77,7 @@ def test_train_data_in_project_dir(monkeypatch: MonkeyPatch, tmp_path: Path): subparsers = parser.add_subparsers() scaffold.add_subparser(subparsers, parents=[]) - args = parser.parse_args(["init", "--no-prompt",]) + args = parser.parse_args(["init", "--no-prompt"]) # Simple config which should train fast. def mock_get_config(*args): diff --git a/tests/cli/test_rasa_test.py b/tests/cli/test_rasa_test.py index ddafedfa4728..582922fcfe9f 100644 --- a/tests/cli/test_rasa_test.py +++ b/tests/cli/test_rasa_test.py @@ -100,7 +100,7 @@ def test_test_with_no_user_utterance( run_in_simple_project_with_model: Callable[..., RunResult] ): write_yaml( - {"pipeline": "KeywordIntentClassifier", "policies": [{"name": "TEDPolicy"}],}, + {"pipeline": "KeywordIntentClassifier", "policies": [{"name": "TEDPolicy"}]}, "config.yml", ) @@ -160,9 +160,7 @@ def test_test_nlu_cross_validation_with_autoconfig( config_path = str(testdir.tmpdir / "config.yml") nlu_path = str(testdir.tmpdir / "nlu.yml") shutil.copy(str(moodbot_nlu_data_path), nlu_path) - write_yaml( - {"language": "en", "pipeline": None, "policies": None,}, config_path, - ) + write_yaml({"language": "en", "pipeline": None, "policies": None}, config_path) args = [ shutil.which("rasa"), "test", diff --git a/tests/cli/test_rasa_train.py b/tests/cli/test_rasa_train.py index e637f65ae4d9..64d0ebcb5bd6 100644 --- a/tests/cli/test_rasa_train.py +++ b/tests/cli/test_rasa_train.py @@ -14,10 +14,7 @@ from rasa.engine.storage.local_model_storage import LocalModelStorage from rasa.engine.storage.resource import Resource from rasa.shared.core.domain import Domain -from rasa.model_training import ( - CODE_NEEDS_TO_BE_RETRAINED, - CODE_FORCED_TRAINING, -) +from rasa.model_training import CODE_NEEDS_TO_BE_RETRAINED, CODE_FORCED_TRAINING # noinspection PyProtectedMember from rasa.cli.train import _get_valid_config @@ -69,9 +66,7 @@ def test_train(run_in_simple_project: Callable[..., RunResult], tmp_path: Path): def test_train_finetune( run_in_simple_project: Callable[..., RunResult], capsys: CaptureFixture ): - run_in_simple_project( - "train", "--finetune", - ) + run_in_simple_project("train", "--finetune") output = capsys.readouterr().out assert "No model for finetuning found" in output diff --git a/tests/cli/test_rasa_x.py b/tests/cli/test_rasa_x.py index f3694e62658b..07533baba7b0 100644 --- a/tests/cli/test_rasa_x.py +++ b/tests/cli/test_rasa_x.py @@ -161,7 +161,7 @@ async def test_pull_runtime_config_from_server(): def test_rasa_x_raises_warning_above_version_3( - monkeypatch: MonkeyPatch, run: Callable[..., RunResult], + monkeypatch: MonkeyPatch, run: Callable[..., RunResult] ): def mock_run_locally(args): return None diff --git a/tests/conftest.py b/tests/conftest.py index 8de3d38243cd..a19204efd77b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -203,7 +203,7 @@ async def trained_default_agent_model( config_path = project_path / "config.yml" rasa.shared.utils.io.write_text_file(config, config_path) model_path = await trained_async( - domain_path, str(config_path), [stories_path, nlu_data_path], + domain_path, str(config_path), [stories_path, nlu_data_path] ) return model_path @@ -379,7 +379,7 @@ async def trained_core_model( stories_path: Text, ) -> Text: trained_core_model_path = await trained_async( - domain=domain_path, config=stack_config_path, training_files=[stories_path], + domain=domain_path, config=stack_config_path, training_files=[stories_path] ) return trained_core_model_path @@ -393,7 +393,7 @@ async def trained_nlu_model( stack_config_path: Text, ) -> Text: trained_nlu_model_path = await trained_async( - domain=domain_path, config=stack_config_path, training_files=[nlu_data_path], + domain=domain_path, config=stack_config_path, training_files=[nlu_data_path] ) return trained_nlu_model_path @@ -589,7 +589,7 @@ async def e2e_bot( @pytest.fixture(scope="module") -async def response_selector_agent(trained_response_selector_bot: Path,) -> Agent: +async def response_selector_agent(trained_response_selector_bot: Path) -> Agent: return await load_agent(str(trained_response_selector_bot)) diff --git a/tests/core/actions/test_forms.py b/tests/core/actions/test_forms.py index 961e713d6460..335e7fd2fede 100644 --- a/tests/core/actions/test_forms.py +++ b/tests/core/actions/test_forms.py @@ -195,9 +195,7 @@ async def test_switch_forms_with_same_slot(default_agent: Agent): UserUttered("return my shoes", {"name": "form_2", "confidence": 1.0}), DefinePrevUserUtteredFeaturization(False), ] - tracker.update_with_events( - next_events, domain, - ) + tracker.update_with_events(next_events, domain) events_expected.extend(next_events) # form_1 is still active, and bot will first validate if the user utterance @@ -394,13 +392,13 @@ async def test_action_rejection(): # Validate function says slot is invalid ( [{"event": "slot", "name": "num_people", "value": None}], - [SlotSet("num_people", None), SlotSet(REQUESTED_SLOT, "num_people"),], + [SlotSet("num_people", None), SlotSet(REQUESTED_SLOT, "num_people")], ), # Validate function decides to request a slot which is not part of the default # slot mapping ( [{"event": "slot", "name": "requested_slot", "value": "is_outside"}], - [SlotSet(REQUESTED_SLOT, "is_outside"),], + [SlotSet(REQUESTED_SLOT, "is_outside")], ), # Validate function decides that no more slots should be requested ( @@ -431,7 +429,7 @@ async def test_action_rejection(): # User rejected manually ( [{"event": "action_execution_rejected", "name": "my form"}], - [ActionExecutionRejected("my form"),], + [ActionExecutionRejected("my form")], ), ], ) @@ -542,7 +540,7 @@ async def test_request_correct_slots_after_unhappy_path_with_custom_required_slo ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "slot_2"), ActionExecuted(ACTION_LISTEN_NAME), - UserUttered("hello", intent={"name": "greet", "confidence": 1.0},), + UserUttered("hello", intent={"name": "greet", "confidence": 1.0}), ActionExecutionRejected(form_name), ActionExecuted("utter_greet"), ], @@ -552,7 +550,7 @@ async def test_request_correct_slots_after_unhappy_path_with_custom_required_slo # Custom form validation action changes the order of the requested slots validate_return_events = [ - {"event": "slot", "name": REQUESTED_SLOT, "value": slot_name_2}, + {"event": "slot", "name": REQUESTED_SLOT, "value": slot_name_2} ] # The form should ask the same slot again when coming back after unhappy path @@ -692,7 +690,7 @@ async def test_validate_slots_on_activation_with_other_action_after_user_utteran @pytest.mark.parametrize( - "utterance_name", ["utter_ask_my_form_num_people", "utter_ask_num_people"], + "utterance_name", ["utter_ask_my_form_num_people", "utter_ask_num_people"] ) def test_name_of_utterance(utterance_name: Text): form_name = "my_form" @@ -963,11 +961,8 @@ async def test_extract_other_slots_with_entity( domain = Domain.from_dict( { "slots": { - "some_other_slot": { - "type": "any", - "mappings": some_other_slot_mapping, - }, - "some_slot": {"type": "any", "mappings": some_slot_mapping,}, + "some_other_slot": {"type": "any", "mappings": some_other_slot_mapping}, + "some_slot": {"type": "any", "mappings": some_slot_mapping}, }, "forms": { form_name: {REQUIRED_SLOTS_KEY: ["some_other_slot", "some_slot"]} @@ -1126,7 +1121,7 @@ async def test_ask_for_slot_if_not_utter_ask( ], ) async def test_ignored_intents_with_slot_type_from_entity( - ignored_intents: Union[Text, List[Text]], slot_not_intent: Union[Text, List[Text]], + ignored_intents: Union[Text, List[Text]], slot_not_intent: Union[Text, List[Text]] ): form_name = "some_form" entity_name = "some_slot" @@ -1219,7 +1214,7 @@ async def test_ignored_intents_with_slot_type_from_entity( ], ) async def test_ignored_intents_with_slot_type_from_text( - ignored_intents: Union[Text, List[Text]], slot_not_intent: Union[Text, List[Text]], + ignored_intents: Union[Text, List[Text]], slot_not_intent: Union[Text, List[Text]] ): form_name = "some_form" entity_name = "some_slot" @@ -1453,7 +1448,7 @@ async def test_extract_other_slots_with_matched_mapping_conditions(): UserUttered( "My name is Emily.", intent={"name": "inform", "confidence": 1.0}, - entities=[{"entity": "name", "value": "Emily"},], + entities=[{"entity": "name", "value": "Emily"}], ), ActionExecuted(ACTION_LISTEN_NAME), ], @@ -1525,7 +1520,7 @@ async def test_extract_other_slots_raises_no_matched_conditions(): UserUttered( "My name is Emily.", intent={"name": "inform", "confidence": 1.0}, - entities=[{"entity": "name", "value": "Emily"},], + entities=[{"entity": "name", "value": "Emily"}], ), ActionExecuted(ACTION_LISTEN_NAME), ], @@ -1587,7 +1582,7 @@ async def test_action_extract_slots_custom_mapping_with_condition(): mocked.post( action_server_url, payload={ - "events": [{"event": "slot", "name": "custom_slot", "value": "test"},] + "events": [{"event": "slot", "name": "custom_slot", "value": "test"}] }, ) @@ -1651,7 +1646,7 @@ async def test_form_slots_empty_with_restart(): UserUttered( "My name is Emily.", intent={"name": "inform", "confidence": 1.0}, - entities=[{"entity": "name", "value": "Emily"},], + entities=[{"entity": "name", "value": "Emily"}], ), SlotSet("name", "emily"), Restarted(), diff --git a/tests/core/conftest.py b/tests/core/conftest.py index 705acab78152..b381b7cc0a86 100644 --- a/tests/core/conftest.py +++ b/tests/core/conftest.py @@ -35,7 +35,7 @@ def as_feature(self): class MockedMongoTrackerStore(MongoTrackerStore): """In-memory mocked version of `MongoTrackerStore`.""" - def __init__(self, _domain: Domain,) -> None: + def __init__(self, _domain: Domain) -> None: from mongomock import MongoClient self.db = MongoClient().rasa diff --git a/tests/core/evaluation/test_marker.py b/tests/core/evaluation/test_marker.py index 9cd9f7f347e4..ea4536fcb2ff 100644 --- a/tests/core/evaluation/test_marker.py +++ b/tests/core/evaluation/test_marker.py @@ -264,9 +264,7 @@ def test_operator_occur(negated: bool): ] events, expected = zip(*events_expected) sub_marker = OrMarker( - [IntentDetectedMarker("1"), SlotSetMarker("2")], - name="or marker", - negated=False, + [IntentDetectedMarker("1"), SlotSetMarker("2")], name="or marker", negated=False ) marker = OccurrenceMarker([sub_marker], name="marker_name", negated=negated) for event in events: @@ -308,9 +306,7 @@ def test_operator_occur_never_applied_negated(): ] events, expected = zip(*events_expected) sub_marker = OrMarker( - [IntentDetectedMarker("1"), SlotSetMarker("2")], - name="or marker", - negated=False, + [IntentDetectedMarker("1"), SlotSetMarker("2")], name="or marker", negated=False ) marker = OccurrenceMarker([sub_marker], name="or never occurred", negated=True) for event in events: @@ -332,7 +328,7 @@ def test_operators_nested_simple(): marker = AndMarker( markers=[ SlotSetMarker("s1"), - OrMarker([IntentDetectedMarker("4"), IntentDetectedMarker("6"),]), + OrMarker([IntentDetectedMarker("4"), IntentDetectedMarker("6")]), ], name="marker_name", ) @@ -631,14 +627,12 @@ def test_marker_validation_raises(config: Any): Marker.from_config(config) -def test_marker_from_path_only_reads_yamls(tmp_path: Path,): +def test_marker_from_path_only_reads_yamls(tmp_path: Path): suffixes = [("yaml", True), ("yml", True), ("yaeml", False), ("config", False)] for idx, (suffix, allowed) in enumerate(suffixes): config = {f"marker-{idx}": {IntentDetectedMarker.positive_tag(): "intent"}} config_file = tmp_path / f"config-{idx}.{suffix}" - rasa.shared.utils.io.write_yaml( - data=config, target=config_file, - ) + rasa.shared.utils.io.write_yaml(data=config, target=config_file) loaded = Marker.from_path(tmp_path) assert len(loaded.sub_markers) == sum(allowed for _, allowed in suffixes) assert set(sub_marker.name for sub_marker in loaded.sub_markers) == set( @@ -659,9 +653,7 @@ def test_marker_from_path_only_reads_yamls(tmp_path: Path,): def test_marker_from_path_adds_special_or_marker(tmp_path: Path, configs: Any): yaml_file = tmp_path / "config.yml" - rasa.shared.utils.io.write_yaml( - data=configs, target=yaml_file, - ) + rasa.shared.utils.io.write_yaml(data=configs, target=yaml_file) loaded = Marker.from_path(tmp_path) assert isinstance(loaded, OrMarker) assert loaded.name == Marker.ANY_MARKER @@ -719,13 +711,13 @@ def test_marker_from_path_raises( AndMarker( markers=[ SlotSetMarker("s1"), - OrMarker([IntentDetectedMarker("4"), IntentDetectedMarker("6"),]), - ], + OrMarker([IntentDetectedMarker("4"), IntentDetectedMarker("6")]), + ] ), 3, ), (SlotSetMarker("s1"), 1), - (AndMarker(markers=[SlotSetMarker("s1"), IntentDetectedMarker("6"),],), 2), + (AndMarker(markers=[SlotSetMarker("s1"), IntentDetectedMarker("6")]), 2), ], ) def test_marker_depth(marker: Marker, expected_depth: int): diff --git a/tests/core/evaluation/test_marker_stats.py b/tests/core/evaluation/test_marker_stats.py index a734a3430715..c811ffc2ad27 100644 --- a/tests/core/evaluation/test_marker_stats.py +++ b/tests/core/evaluation/test_marker_stats.py @@ -139,10 +139,10 @@ def test_process_results_per_session(seed: int): stats.session_results[marker][stat_name][idx], stat_value ) for idx in range(num_sessions): - assert stats.session_identifier[idx] == (sender_ids[idx], session_indices[idx],) + assert stats.session_identifier[idx] == (sender_ids[idx], session_indices[idx]) -@pytest.mark.parametrize("seed", [2345, 5654, 2345234,]) +@pytest.mark.parametrize("seed", [2345, 5654, 2345234]) def test_process_results_overall(seed: int): rng = np.random.default_rng(seed=seed) ( @@ -176,7 +176,7 @@ def test_process_results_overall(seed: int): assert stats.num_preceding_user_turns_collected[marker] == concatenated_numbers -@pytest.mark.parametrize("seed", [2345, 5654, 2345234,]) +@pytest.mark.parametrize("seed", [2345, 5654, 2345234]) def test_overall_statistics_to_csv(tmp_path: Path, seed: int): rng = np.random.default_rng(seed=seed) ( @@ -253,7 +253,7 @@ def test_overall_statistics_to_csv(tmp_path: Path, seed: int): row_idx += 1 -@pytest.mark.parametrize("seed", [2345, 5654, 2345234,]) +@pytest.mark.parametrize("seed", [2345, 5654, 2345234]) def test_per_session_statistics_to_csv(tmp_path: Path, seed: int): rng = np.random.default_rng(seed=seed) diff --git a/tests/core/featurizers/test_precomputation.py b/tests/core/featurizers/test_precomputation.py index 722c5b25efe8..4fef7d71a515 100644 --- a/tests/core/featurizers/test_precomputation.py +++ b/tests/core/featurizers/test_precomputation.py @@ -363,7 +363,7 @@ def test_container_derive_messages_from_domain_and_add(): @pytest.fixture def input_converter( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, default_execution_context: ExecutionContext ): return CoreFeaturizationInputConverter.create( CoreFeaturizationInputConverter.get_default_config(), @@ -471,7 +471,7 @@ def test_converter_for_inference(input_converter: CoreFeaturizationInputConverte @pytest.fixture def collector( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, default_execution_context: ExecutionContext ): return CoreFeaturizationCollector.create( CoreFeaturizationCollector.get_default_config(), diff --git a/tests/core/featurizers/test_single_state_featurizers.py b/tests/core/featurizers/test_single_state_featurizers.py index 6587cc7a8947..cf84cf55e194 100644 --- a/tests/core/featurizers/test_single_state_featurizers.py +++ b/tests/core/featurizers/test_single_state_featurizers.py @@ -222,13 +222,12 @@ def test_encode_state__without_lookup(action_name: Text): def dummy_features( fill_value: int, units: int, attribute: Text, type: Text, is_sparse: bool ) -> Features: - """Create some dummy `Features` with the desired properties. - """ + """Create some dummy `Features` with the desired properties.""" matrix = np.full(shape=(1, units), fill_value=fill_value) if is_sparse: matrix = scipy.sparse.coo_matrix(matrix) return Features( - features=matrix, attribute=attribute, feature_type=type, origin="whatever", + features=matrix, attribute=attribute, feature_type=type, origin="whatever" ) @@ -311,8 +310,8 @@ def test_encode_state__with_lookup__looksup_or_creates_features(action_name: Tex action_text = "throw a ball" intent = "inform" state = { - USER: {TEXT: text, INTENT: intent, ENTITIES: entity_name_list,}, - PREVIOUS_ACTION: {ACTION_NAME: action_name, ACTION_TEXT: action_text,}, + USER: {TEXT: text, INTENT: intent, ENTITIES: entity_name_list}, + PREVIOUS_ACTION: {ACTION_NAME: action_name, ACTION_TEXT: action_text}, ACTIVE_LOOP: {"name": "active_loop_4"}, SLOTS: {"slot_1": (1.0,)}, } @@ -374,7 +373,7 @@ def test_encode_state__with_lookup__looksup_or_creates_features(action_name: Tex precomputations.add(Message(data={ACTION_NAME: action_name})) # encode the state - encoded = f.encode_state(state, precomputations=precomputations,) + encoded = f.encode_state(state, precomputations=precomputations) # check all the features are encoded and *_text features are encoded by a # dense featurizer @@ -459,7 +458,7 @@ def test_encode_entities__with_entity_roles_and_groups(): # encode! encoded = f.encode_entities( - entity_data={TEXT: text, ENTITIES: entities}, precomputations=precomputations, + entity_data={TEXT: text, ENTITIES: entities}, precomputations=precomputations ) # check @@ -519,7 +518,7 @@ def test_encode_entities__with_bilou_entity_roles_and_groups(): # encode! encoded = f.encode_entities( - {TEXT: text, ENTITIES: entities,}, + {TEXT: text, ENTITIES: entities}, precomputations=precomputations, bilou_tagging=True, ) @@ -542,7 +541,7 @@ def test_encode_entities__with_bilou_entity_roles_and_groups(): ENTITY_ATTRIBUTE_VALUE: "Saint Petersburg", ENTITY_ATTRIBUTE_START: 15, ENTITY_ATTRIBUTE_END: 31, - }, + } ] message = Message({TEXT: text, TOKENS_NAMES[TEXT]: tokens, ENTITIES: entities}) @@ -552,7 +551,7 @@ def test_encode_entities__with_bilou_entity_roles_and_groups(): # encode! encoded = f.encode_entities( - {TEXT: text, ENTITIES: entities,}, + {TEXT: text, ENTITIES: entities}, precomputations=precomputations, bilou_tagging=True, ) diff --git a/tests/core/featurizers/test_tracker_featurizer.py b/tests/core/featurizers/test_tracker_featurizer.py index fe29d1fec3c8..dda663cbf8a4 100644 --- a/tests/core/featurizers/test_tracker_featurizer.py +++ b/tests/core/featurizers/test_tracker_featurizer.py @@ -63,16 +63,16 @@ def test_convert_action_labels_to_ids(domain: Domain): [ domain.action_names_or_texts.index("utter_greet"), domain.action_names_or_texts.index("utter_channel"), - ], + ] ), np.array( [ domain.action_names_or_texts.index("utter_greet"), domain.action_names_or_texts.index("utter_default"), domain.action_names_or_texts.index("utter_goodbye"), - ], + ] ), - ], + ] ) assert expected_output.size == actual_output.size @@ -106,7 +106,7 @@ def test_convert_intent_labels_to_ids(domain: Domain): domain.intents.index("affirm"), LABEL_PAD_ID, ], - ], + ] ) assert expected_labels.size == actual_labels.size assert expected_labels.shape == actual_labels.shape @@ -156,7 +156,7 @@ def test_featurize_trackers_with_full_dialogue_tracker_featurizer( tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker], moodbot_domain, precomputations=None ) expected_features = [ @@ -197,7 +197,7 @@ def test_featurize_trackers_with_full_dialogue_tracker_featurizer( def test_trackers_ignore_action_unlikely_intent_with_full_dialogue_tracker_featurizer( - moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]], + moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]] ): tracker = DialogueStateTracker.from_events( @@ -267,7 +267,7 @@ def test_trackers_ignore_action_unlikely_intent_with_full_dialogue_tracker_featu def test_trackers_keep_action_unlikely_intent_with_full_dialogue_tracker_featurizer( - moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]], + moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]] ): tracker = DialogueStateTracker.from_events( @@ -293,7 +293,7 @@ def test_trackers_keep_action_unlikely_intent_with_full_dialogue_tracker_featuri tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [tracker], moodbot_domain, precomputations=None, + [tracker], moodbot_domain, precomputations=None ) expected_features = [ @@ -345,7 +345,7 @@ def test_create_state_features_full_dialogue_tracker_featurizer( tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer) state_featurizer.prepare_for_training(moodbot_domain) actual_features = tracker_featurizer.create_state_features( - [moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker], moodbot_domain, precomputations=None ) expected_features = [ @@ -378,7 +378,7 @@ def test_create_state_features_full_dialogue_tracker_featurizer( def test_state_features_ignore_action_unlikely_intent_full_dialogue_tracker_featurizer( - moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]], + moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]] ): tracker = DialogueStateTracker.from_events( "default", @@ -435,7 +435,7 @@ def test_state_features_ignore_action_unlikely_intent_full_dialogue_tracker_feat def test_state_features_keep_action_unlikely_intent_full_dialogue_tracker_featurizer( - moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]], + moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]] ): tracker = DialogueStateTracker.from_events( "default", @@ -458,7 +458,7 @@ def test_state_features_keep_action_unlikely_intent_full_dialogue_tracker_featur tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer) state_featurizer.prepare_for_training(moodbot_domain) actual_features = tracker_featurizer.create_state_features( - [tracker], moodbot_domain, precomputations=None, + [tracker], moodbot_domain, precomputations=None ) expected_features = [ @@ -496,7 +496,7 @@ def test_prediction_states_with_full_dialogue_tracker_featurizer( state_featurizer = SingleStateFeaturizer() tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer) actual_states = tracker_featurizer.prediction_states( - [moodbot_tracker], moodbot_domain, + [moodbot_tracker], moodbot_domain ) expected_states = [ @@ -506,7 +506,7 @@ def test_prediction_states_with_full_dialogue_tracker_featurizer( PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}}, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_unhappy"}, @@ -523,7 +523,7 @@ def test_prediction_states_with_full_dialogue_tracker_featurizer( PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "deny"}, }, - {USER: {INTENT: "deny"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_goodbye"},}, + {USER: {INTENT: "deny"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_goodbye"}}, ] ] @@ -553,7 +553,7 @@ def test_prediction_states_hide_rule_states_with_full_dialogue_tracker_featurize ) actual_states = tracker_featurizer.prediction_states( - [rule_tracker], moodbot_domain, ignore_rule_only_turns=True, + [rule_tracker], moodbot_domain, ignore_rule_only_turns=True ) expected_states = [ @@ -563,7 +563,7 @@ def test_prediction_states_hide_rule_states_with_full_dialogue_tracker_featurize PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - ], + ] ] assert actual_states is not None @@ -587,7 +587,7 @@ def test_prediction_states_hide_rule_states_with_full_dialogue_tracker_featurize ) actual_states = tracker_featurizer.prediction_states( - [embedded_rule_tracker], moodbot_domain, ignore_rule_only_turns=True, + [embedded_rule_tracker], moodbot_domain, ignore_rule_only_turns=True ) expected_states = [ @@ -650,7 +650,7 @@ def test_prediction_states_ignore_action_intent_unlikely_full_dialogue_featurize PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}}, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_great"}, @@ -697,7 +697,7 @@ def test_prediction_states_keeps_action_intent_unlikely_full_dialogue_featurizer domain=moodbot_domain, ) - actual_states = tracker_featurizer.prediction_states([tracker], moodbot_domain,) + actual_states = tracker_featurizer.prediction_states([tracker], moodbot_domain) expected_states = [ [ @@ -710,7 +710,7 @@ def test_prediction_states_keeps_action_intent_unlikely_full_dialogue_featurizer USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: ACTION_UNLIKELY_INTENT_NAME}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}}, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_great"}, @@ -750,11 +750,11 @@ def test_featurize_trackers_with_max_history_tracker_featurizer( ) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker], moodbot_domain, precomputations=None ) expected_features = [ - [{},], + [{}], [ {}, { @@ -867,7 +867,7 @@ def test_featurize_trackers_ignore_action_unlikely_intent_max_history_featurizer ) state_featurizer = SingleStateFeaturizer() tracker_featurizer = MaxHistoryTrackerFeaturizer( - state_featurizer, max_history=max_history, + state_featurizer, max_history=max_history ) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( @@ -878,7 +878,7 @@ def test_featurize_trackers_ignore_action_unlikely_intent_max_history_featurizer ) expected_features = [ - [{},], + [{}], [ {}, { @@ -933,15 +933,15 @@ def test_featurize_trackers_keep_action_unlikely_intent_max_history_featurizer( ) state_featurizer = SingleStateFeaturizer() tracker_featurizer = MaxHistoryTrackerFeaturizer( - state_featurizer, max_history=max_history, + state_featurizer, max_history=max_history ) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [tracker], moodbot_domain, precomputations=None, + [tracker], moodbot_domain, precomputations=None ) expected_features = [ - [{},], + [{}], [ {}, { @@ -988,7 +988,7 @@ def test_featurize_trackers_keep_action_unlikely_intent_max_history_featurizer( @pytest.mark.parametrize( "remove_duplicates,max_history", - [[True, None], [True, 2], [False, None], [False, 2],], + [[True, None], [True, 2], [False, None], [False, 2]], ) def test_deduplicate_featurize_trackers_with_max_history_tracker_featurizer( moodbot_tracker: DialogueStateTracker, @@ -1004,11 +1004,11 @@ def test_deduplicate_featurize_trackers_with_max_history_tracker_featurizer( # Add Duplicate moodbot_tracker states should get removed. actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [moodbot_tracker, moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker, moodbot_tracker], moodbot_domain, precomputations=None ) expected_features = [ - [{},], + [{}], [ {}, { @@ -1118,7 +1118,7 @@ def test_create_state_features_with_max_history_tracker_featurizer( ) state_featurizer.prepare_for_training(moodbot_domain) actual_features = tracker_featurizer.create_state_features( - [moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker], moodbot_domain, precomputations=None ) expected_features = [ @@ -1245,7 +1245,7 @@ def test_create_state_features_keep_action_unlikely_intent_max_history_featurize ) state_featurizer.prepare_for_training(moodbot_domain) actual_features = tracker_featurizer.create_state_features( - [tracker], moodbot_domain, precomputations=None, + [tracker], moodbot_domain, precomputations=None ) expected_features = [ @@ -1291,7 +1291,7 @@ def test_prediction_states_with_max_history_tracker_featurizer( state_featurizer, max_history=max_history ) actual_states = tracker_featurizer.prediction_states( - [moodbot_tracker], moodbot_domain, + [moodbot_tracker], moodbot_domain ) expected_states = [ @@ -1301,7 +1301,7 @@ def test_prediction_states_with_max_history_tracker_featurizer( PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}}, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_unhappy"}, @@ -1318,7 +1318,7 @@ def test_prediction_states_with_max_history_tracker_featurizer( PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "deny"}, }, - {USER: {INTENT: "deny"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_goodbye"},}, + {USER: {INTENT: "deny"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_goodbye"}}, ] ] if max_history is not None: @@ -1355,7 +1355,7 @@ def test_prediction_states_hide_rule_states_with_max_history_tracker_featurizer( ) actual_states = tracker_featurizer.prediction_states( - [rule_tracker], moodbot_domain, ignore_rule_only_turns=True, + [rule_tracker], moodbot_domain, ignore_rule_only_turns=True ) expected_states = [ @@ -1365,7 +1365,7 @@ def test_prediction_states_hide_rule_states_with_max_history_tracker_featurizer( PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - ], + ] ] assert actual_states is not None @@ -1389,7 +1389,7 @@ def test_prediction_states_hide_rule_states_with_max_history_tracker_featurizer( ) actual_states = tracker_featurizer.prediction_states( - [embedded_rule_tracker], moodbot_domain, ignore_rule_only_turns=True, + [embedded_rule_tracker], moodbot_domain, ignore_rule_only_turns=True ) expected_states = [ @@ -1460,7 +1460,7 @@ def test_prediction_states_ignores_action_intent_unlikely_max_history_featurizer PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}}, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_great"}, @@ -1515,7 +1515,7 @@ def test_prediction_states_keeps_action_intent_unlikely_max_history_featurizer( domain=moodbot_domain, ) - actual_states = tracker_featurizer.prediction_states([tracker], moodbot_domain,) + actual_states = tracker_featurizer.prediction_states([tracker], moodbot_domain) expected_states = [ [ @@ -1528,7 +1528,7 @@ def test_prediction_states_keeps_action_intent_unlikely_max_history_featurizer( USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: ACTION_UNLIKELY_INTENT_NAME}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}}, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_great"}, @@ -1578,11 +1578,11 @@ def test_featurize_trackers_with_intent_max_history_tracker_featurizer( ) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker], moodbot_domain, precomputations=None ) expected_features = [ - [{},], + [{}], [ {}, { @@ -1652,7 +1652,7 @@ def test_trackers_ignore_action_unlikely_intent_intent_max_history_featurizer( ) state_featurizer = IntentTokenizerSingleStateFeaturizer() tracker_featurizer = IntentMaxHistoryTrackerFeaturizer( - state_featurizer, max_history=max_history, + state_featurizer, max_history=max_history ) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( @@ -1663,7 +1663,7 @@ def test_trackers_ignore_action_unlikely_intent_intent_max_history_featurizer( ) expected_features = [ - [{},], + [{}], [ {}, { @@ -1719,15 +1719,15 @@ def test_trackers_keep_action_unlikely_intent_intent_max_history_featurizer( ) state_featurizer = IntentTokenizerSingleStateFeaturizer() tracker_featurizer = IntentMaxHistoryTrackerFeaturizer( - state_featurizer, max_history=max_history, + state_featurizer, max_history=max_history ) actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [tracker], moodbot_domain, precomputations=None, + [tracker], moodbot_domain, precomputations=None ) expected_features = [ - [{},], + [{}], [ {}, { @@ -1782,11 +1782,11 @@ def test_deduplicate_featurize_trackers_with_intent_max_history_tracker_featuriz # Add Duplicate moodbot_tracker states should get removed. actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers( - [moodbot_tracker, moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker, moodbot_tracker], moodbot_domain, precomputations=None ) expected_features = [ - [{},], + [{}], [ {}, { @@ -1862,7 +1862,7 @@ def test_create_state_features_with_intent_max_history_tracker_featurizer( ) state_featurizer.prepare_for_training(moodbot_domain) actual_features = tracker_featurizer.create_state_features( - [moodbot_tracker], moodbot_domain, precomputations=None, + [moodbot_tracker], moodbot_domain, precomputations=None ) expected_features = [ @@ -1879,7 +1879,7 @@ def test_create_state_features_with_intent_max_history_tracker_featurizer( }, {ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]}, {ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]}, - ], + ] ] if max_history is not None: expected_features = [x[-max_history:] for x in expected_features] @@ -1946,7 +1946,7 @@ def test_state_features_ignore_action_unlikely_intent_intent_max_history_featuri INTENT: [moodbot_features["intents"]["mood_great"]], }, {ACTION_NAME: [moodbot_features["actions"]["utter_happy"]]}, - ], + ] ] if max_history is not None: expected_features = [x[-max_history:] for x in expected_features] @@ -1994,7 +1994,7 @@ def test_state_features_keep_action_unlikely_intent_intent_max_history_featurize ) state_featurizer.prepare_for_training(moodbot_domain) actual_features = tracker_featurizer.create_state_features( - [tracker], moodbot_domain, precomputations=None, + [tracker], moodbot_domain, precomputations=None ) expected_features = [ @@ -2012,7 +2012,7 @@ def test_state_features_keep_action_unlikely_intent_intent_max_history_featurize }, {ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]}, {ACTION_NAME: [moodbot_features["actions"]["utter_happy"]]}, - ], + ] ] if max_history is not None: @@ -2044,7 +2044,7 @@ def test_prediction_states_with_intent_max_history_tracker_featurizer( state_featurizer, max_history=max_history ) actual_states = tracker_featurizer.prediction_states( - [moodbot_tracker], moodbot_domain, + [moodbot_tracker], moodbot_domain ) expected_states = [ @@ -2054,7 +2054,7 @@ def test_prediction_states_with_intent_max_history_tracker_featurizer( PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}}, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_unhappy"}, @@ -2103,7 +2103,7 @@ def test_prediction_states_hide_rule_states_intent_max_history_featurizer( ) actual_states = tracker_featurizer.prediction_states( - [rule_tracker], moodbot_domain, ignore_rule_only_turns=True, + [rule_tracker], moodbot_domain, ignore_rule_only_turns=True ) expected_states = [[{}]] @@ -2127,10 +2127,10 @@ def test_prediction_states_hide_rule_states_intent_max_history_featurizer( ) actual_states = tracker_featurizer.prediction_states( - [embedded_rule_tracker], moodbot_domain, ignore_rule_only_turns=True, + [embedded_rule_tracker], moodbot_domain, ignore_rule_only_turns=True ) - expected_states = [[{},]] + expected_states = [[{}]] assert actual_states is not None assert len(actual_states) == len(expected_states) @@ -2179,7 +2179,7 @@ def test_prediction_states_ignores_action_intent_unlikely_intent_max_history_fea PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "greet"}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}}, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_great"}, @@ -2243,7 +2243,7 @@ def test_prediction_states_keeps_action_intent_unlikely_intent_max_history_featu USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: ACTION_UNLIKELY_INTENT_NAME}, }, - {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},}, + {USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"}}, { PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME}, USER: {INTENT: "mood_great"}, @@ -2271,14 +2271,14 @@ def test_prediction_states_keeps_action_intent_unlikely_intent_max_history_featu @pytest.mark.parametrize( "remove_duplicates, max_history", - [[True, None], [True, 2], [False, None], [False, 2],], + [[True, None], [True, 2], [False, None], [False, 2]], ) def test_multilabels_with_intent_max_history_tracker_featurizer( moodbot_domain: Domain, max_history: Optional[int], remove_duplicates: bool ): state_featurizer = IntentTokenizerSingleStateFeaturizer() tracker_featurizer = IntentMaxHistoryTrackerFeaturizer( - state_featurizer, max_history=max_history, remove_duplicates=remove_duplicates, + state_featurizer, max_history=max_history, remove_duplicates=remove_duplicates ) event_list1 = [ @@ -2303,7 +2303,7 @@ def test_multilabels_with_intent_max_history_tracker_featurizer( ) _1, actual_labels, _2 = tracker_featurizer.featurize_trackers( - [tracker1, tracker2], moodbot_domain, precomputations=None, + [tracker1, tracker2], moodbot_domain, precomputations=None ) greet_index = 5 @@ -2312,7 +2312,7 @@ def test_multilabels_with_intent_max_history_tracker_featurizer( if remove_duplicates: expected_labels = np.array( - [[greet_index, -1], [mood_great_index, mood_unhappy_index],] + [[greet_index, -1], [mood_great_index, mood_unhappy_index]] ) else: expected_labels = np.array( diff --git a/tests/core/nlg/test_response.py b/tests/core/nlg/test_response.py index d9f81f6fa16d..2952f75dbbca 100644 --- a/tests/core/nlg/test_response.py +++ b/tests/core/nlg/test_response.py @@ -390,7 +390,7 @@ async def test_nlg_conditional_response_variation_condition_met_channel_mismatch mappings=[{}], initial_value="B", influence_conversation=False, - ), + ) ], "app", "condition example B no channel", @@ -402,7 +402,7 @@ async def test_nlg_conditional_response_variation_condition_met_channel_mismatch mappings=[{}], initial_value="C", influence_conversation=False, - ), + ) ], "", "default", @@ -414,7 +414,7 @@ async def test_nlg_conditional_response_variation_condition_met_channel_mismatch mappings=[{}], initial_value="D", influence_conversation=False, - ), + ) ], "app", "default", diff --git a/tests/core/policies/test_rule_policy.py b/tests/core/policies/test_rule_policy.py index 3d42ee27346b..5d075642d4ed 100644 --- a/tests/core/policies/test_rule_policy.py +++ b/tests/core/policies/test_rule_policy.py @@ -43,11 +43,7 @@ FollowupAction, ) from rasa.core.nlg import TemplatedNaturalLanguageGenerator -from rasa.core.policies.rule_policy import ( - RulePolicy, - InvalidRule, - RULES, -) +from rasa.core.policies.rule_policy import RulePolicy, InvalidRule, RULES from rasa.graph_components.providers.rule_only_provider import RuleOnlyDataProvider from rasa.shared.core.trackers import DialogueStateTracker from rasa.shared.core.generator import TrackerWithCachedStates @@ -1075,7 +1071,7 @@ async def test_predict_slot_with_initial_slot_matches_rule(policy: RulePolicy): form_conversation = DialogueStateTracker.from_events( "slot rule test", - evts=[ActionExecuted(ACTION_LISTEN_NAME), UserUttered(intent={"name": "i1"}),], + evts=[ActionExecuted(ACTION_LISTEN_NAME), UserUttered(intent={"name": "i1"})], domain=domain, slots=domain.slots, ) @@ -1926,7 +1922,7 @@ async def test_one_stage_fallback_rule(policy: RulePolicy): is_rule_tracker=True, ) policy.train( - [greet_rule_which_only_applies_at_start, fallback_recover_rule], domain, + [greet_rule_which_only_applies_at_start, fallback_recover_rule], domain ) # RulePolicy predicts fallback action @@ -2544,9 +2540,7 @@ def test_do_not_hide_rule_turn_with_loops_in_stories(policy: RulePolicy): form_activation_story = form_activation_rule.copy() form_activation_story.is_rule_tracker = False - policy.train( - [form_activation_rule, form_activation_story], domain, - ) + policy.train([form_activation_rule, form_activation_story], domain) assert policy.lookup[RULE_ONLY_LOOPS] == [] conversation_events = [ @@ -2604,9 +2598,7 @@ def test_hide_rule_turn_with_loops_as_followup_action(policy: RulePolicy): form_activation_story = form_activation_rule.copy() form_activation_story.is_rule_tracker = False - policy.train( - [form_activation_rule, GREET_RULE, form_activation_story], domain, - ) + policy.train([form_activation_rule, GREET_RULE, form_activation_story], domain) assert policy.lookup[RULE_ONLY_LOOPS] == [] conversation_events = [ @@ -3001,7 +2993,7 @@ def test_rule_with_multiple_slots(policy: RulePolicy): evts=[ ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), - UserUttered(intent={"name": intent_1},), + UserUttered(intent={"name": intent_1}), SlotSet(slot_1, value_1), SlotSet(slot_2, value_2), ActionExecuted(utter_1), @@ -3014,7 +3006,7 @@ def test_rule_with_multiple_slots(policy: RulePolicy): # the order of slots set doesn't matter for prediction conversation_events = [ ActionExecuted(ACTION_LISTEN_NAME), - UserUttered("haha", intent={"name": intent_1},), + UserUttered("haha", intent={"name": intent_1}), SlotSet(slot_2, value_2), SlotSet(slot_1, value_1), ] @@ -3069,7 +3061,7 @@ def test_include_action_unlikely_intent(policy: RulePolicy): evts=[ ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), - UserUttered(intent={"name": intent_1},), + UserUttered(intent={"name": intent_1}), SlotSet(slot_1, value_1), SlotSet(slot_2, value_2), ActionExecuted(utter_1), @@ -3101,7 +3093,7 @@ def test_include_action_unlikely_intent(policy: RulePolicy): # ignore action_unlikely_intent. conversation_events = [ ActionExecuted(ACTION_LISTEN_NAME), - UserUttered("haha", intent={"name": intent_1},), + UserUttered("haha", intent={"name": intent_1}), SlotSet(slot_2, value_2), SlotSet(slot_1, value_1), ActionExecuted(ACTION_UNLIKELY_INTENT_NAME), @@ -3118,7 +3110,7 @@ def test_include_action_unlikely_intent(policy: RulePolicy): # anywhere else also triggers utter_2 conversation_events = [ ActionExecuted(ACTION_LISTEN_NAME), - UserUttered("dummy", intent={"name": intent_2},), + UserUttered("dummy", intent={"name": intent_2}), ActionExecuted(ACTION_UNLIKELY_INTENT_NAME), ] prediction = policy.predict_action_probabilities( diff --git a/tests/core/policies/test_ted_policy.py b/tests/core/policies/test_ted_policy.py index 4d0d8cd90a6a..09d4d63ec887 100644 --- a/tests/core/policies/test_ted_policy.py +++ b/tests/core/policies/test_ted_policy.py @@ -104,7 +104,7 @@ def test_diagnostics( precomputations = None policy.train([GREET_RULE], domain, precomputations) prediction = policy.predict_action_probabilities( - GREET_RULE, domain, precomputations, + GREET_RULE, domain, precomputations ) assert prediction.diagnostic_data @@ -226,9 +226,7 @@ def test_train_fails_with_checkpoint_zero_eval_num_epochs(self, tmp_path: Path): "Only values either equal to -1 or greater" " than 0 are allowed for this parameter." ) - with pytest.raises( - InvalidConfigException, match=match_string, - ): + with pytest.raises(InvalidConfigException, match=match_string): train_core( domain="data/test_domains/default.yml", stories="data/test_yaml_stories/stories_defaultdomain.yml", @@ -289,7 +287,7 @@ def test_ranking_length_and_renormalization( ): precomputations = None prediction = trained_policy.predict_action_probabilities( - tracker, default_domain, precomputations, + tracker, default_domain, precomputations ) # first check the output is what we expect @@ -331,7 +329,7 @@ def test_label_data_assembly( assert assembled_label_data.num_examples == default_domain.num_actions assert list( assembled_label_data_signature[f"{LABEL}_{ACTION_NAME}"].keys() - ) == [MASK, SENTENCE,] + ) == [MASK, SENTENCE] assert list(assembled_label_data_signature[LABEL].keys()) == [IDS] assert ( assembled_label_data_signature[f"{LABEL}_{ACTION_NAME}"][SENTENCE][0].units @@ -346,7 +344,7 @@ def test_gen_batch( ) precomputations = None training_data, label_ids, entity_tags = trained_policy._featurize_for_training( - training_trackers, default_domain, precomputations, + training_trackers, default_domain, precomputations ) _, all_labels = trained_policy._create_label_data( @@ -499,14 +497,14 @@ def test_gen_batch( [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered(text="hello", intent={"name": "greet"}), - EntitiesAdded(entities=[{"entity": "name", "value": "Peter"},]), + EntitiesAdded(entities=[{"entity": "name", "value": "Peter"}]), ActionExecuted(ACTION_UNLIKELY_INTENT_NAME), ActionExecuted("utter_greet"), ], [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered(text="hello", intent={"name": "greet"}), - EntitiesAdded(entities=[{"entity": "name", "value": "Peter"},]), + EntitiesAdded(entities=[{"entity": "name", "value": "Peter"}]), ActionExecuted("utter_greet"), ], ), @@ -548,10 +546,10 @@ def test_ignore_action_unlikely_intent( "test 2", evts=tracker_events_without_action ) prediction_with_action = trained_policy.predict_action_probabilities( - tracker_with_action, default_domain, precomputations, + tracker_with_action, default_domain, precomputations ) prediction_without_action = trained_policy.predict_action_probabilities( - tracker_without_action, default_domain, precomputations, + tracker_without_action, default_domain, precomputations ) # If the weights didn't change then both trackers @@ -625,7 +623,7 @@ def test_ranking_length_and_renormalization( default_domain: Domain, ): policy_prediction = trained_policy.predict_action_probabilities( - tracker, default_domain, precomputations=None, + tracker, default_domain, precomputations=None ) assert sum(policy_prediction.probabilities) != pytest.approx(1) @@ -634,7 +632,7 @@ def test_prediction_on_empty_tracker( ): tracker = DialogueStateTracker(DEFAULT_SENDER_ID, default_domain.slots) prediction = trained_policy.predict_action_probabilities( - tracker, default_domain, precomputations=None, + tracker, default_domain, precomputations=None ) assert not prediction.is_end_to_end_prediction assert len(prediction.probabilities) == default_domain.num_actions @@ -678,7 +676,7 @@ def test_ranking_length_and_renormalization( ): precomputations = None predicted_probabilities = trained_policy.predict_action_probabilities( - tracker, default_domain, precomputations, + tracker, default_domain, precomputations ).probabilities assert all([confidence >= 0 for confidence in predicted_probabilities]) assert sum([confidence > 0 for confidence in predicted_probabilities]) == 4 @@ -690,11 +688,7 @@ def _config( self, config_override: Optional[Dict[Text, Any]] = None ) -> Dict[Text, Any]: config_override = config_override or {} - return { - **TEDPolicy.get_default_config(), - RANKING_LENGTH: 3, - **config_override, - } + return {**TEDPolicy.get_default_config(), RANKING_LENGTH: 3, **config_override} def test_ranking_length(self, trained_policy: TEDPolicy): assert trained_policy.config[RANKING_LENGTH] == 3 @@ -705,11 +699,7 @@ def _config( self, config_override: Optional[Dict[Text, Any]] = None ) -> Dict[Text, Any]: config_override = config_override or {} - return { - **TEDPolicy.get_default_config(), - RANKING_LENGTH: 11, - **config_override, - } + return {**TEDPolicy.get_default_config(), RANKING_LENGTH: 11, **config_override} def test_ranking_length(self, trained_policy: TEDPolicy): assert trained_policy.config[RANKING_LENGTH] == 11 @@ -720,10 +710,7 @@ def _config( self, config_override: Optional[Dict[Text, Any]] = None ) -> Dict[Text, Any]: config_override = config_override or {} - return { - **TEDPolicy.get_default_config(), - **config_override, - } + return {**TEDPolicy.get_default_config(), **config_override} def create_policy( self, diff --git a/tests/core/policies/test_unexpected_intent_policy.py b/tests/core/policies/test_unexpected_intent_policy.py index 5e220de03aa5..07796d2d8daa 100644 --- a/tests/core/policies/test_unexpected_intent_policy.py +++ b/tests/core/policies/test_unexpected_intent_policy.py @@ -88,7 +88,7 @@ def test_ranking_length_and_renormalization( ): precomputations = None prediction_metadata = trained_policy.predict_action_probabilities( - tracker, default_domain, precomputations, + tracker, default_domain, precomputations ).action_metadata assert ( prediction_metadata is None @@ -171,7 +171,7 @@ def test_prepared_data_for_threshold_prediction( default_domain, stories_path, augmentation_factor=0 ) training_model_data, _ = trained_policy._prepare_for_training( - training_trackers, default_domain, precomputations=None, + training_trackers, default_domain, precomputations=None ) data_for_prediction = trained_policy._prepare_data_for_prediction( @@ -184,7 +184,7 @@ def test_similarities_collection_for_label_ids(self): label_ids = np.array([[0, 1], [1, -1], [2, -1]]) outputs = { "similarities": np.array( - [[[1.2, 0.3, 0.2]], [[0.5, 0.2, 1.6]], [[0.01, 0.1, 1.7]],] + [[[1.2, 0.3, 0.2]], [[0.5, 0.2, 1.6]], [[0.01, 0.1, 1.7]]] ) } label_id_similarities = UnexpecTEDIntentPolicy._collect_label_id_grouped_scores( @@ -313,7 +313,7 @@ def test_post_training_threshold_computation( default_domain, stories_path, augmentation_factor=0 ) training_model_data, label_ids = trained_policy._prepare_for_training( - training_trackers, default_domain, precomputations=None, + training_trackers, default_domain, precomputations=None ) trained_policy.compute_label_quantiles_post_training( @@ -543,7 +543,7 @@ def test_action_unlikely_intent_prediction( tracker = DialogueStateTracker(sender_id="init", slots=default_domain.slots) tracker.update_with_events( - [UserUttered(text="hello", intent={"name": query_intent})], default_domain, + [UserUttered(text="hello", intent={"name": query_intent})], default_domain ) # Preset the model predictions to the similarity values @@ -699,7 +699,7 @@ def test_skip_predictions_if_new_intent( with caplog.at_level(logging.DEBUG): prediction = loaded_policy.predict_action_probabilities( - tracker, default_domain, precomputations=None, + tracker, default_domain, precomputations=None ) assert "Skipping predictions for UnexpecTEDIntentPolicy" in caplog.text @@ -730,7 +730,7 @@ def test_skip_predictions_if_new_intent( [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered(text="hello", intent={"name": "greet"}), - EntitiesAdded(entities=[{"entity": "name", "value": "Peter"},]), + EntitiesAdded(entities=[{"entity": "name", "value": "Peter"}]), ActionExecuted(ACTION_UNLIKELY_INTENT_NAME), ActionExecuted("utter_greet"), UserUttered(text="sad", intent={"name": "thank_you"}), @@ -738,7 +738,7 @@ def test_skip_predictions_if_new_intent( [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered(text="hello", intent={"name": "greet"}), - EntitiesAdded(entities=[{"entity": "name", "value": "Peter"},]), + EntitiesAdded(entities=[{"entity": "name", "value": "Peter"}]), ActionExecuted("utter_greet"), UserUttered(text="sad", intent={"name": "thank_you"}), ], @@ -964,7 +964,7 @@ def test_individual_label_metadata( ), ActionExecuted("utter_goodbye"), ActionExecuted(ACTION_LISTEN_NAME), - ], + ] ], ), # Filter because of no action name @@ -1004,7 +1004,7 @@ def test_individual_label_metadata( ), ActionExecuted("utter_goodbye"), ActionExecuted(ACTION_LISTEN_NAME), - ], + ] ], ), # Filter because of no intent @@ -1042,7 +1042,7 @@ def test_individual_label_metadata( ), ActionExecuted("utter_goodbye"), ActionExecuted(ACTION_LISTEN_NAME), - ], + ] ], ), # No filter needed @@ -1058,7 +1058,7 @@ def test_individual_label_metadata( ), ActionExecuted("utter_goodbye"), ActionExecuted(ACTION_LISTEN_NAME), - ], + ] ], [ [ @@ -1071,7 +1071,7 @@ def test_individual_label_metadata( ), ActionExecuted("utter_goodbye"), ActionExecuted(ACTION_LISTEN_NAME), - ], + ] ], ), # Filter to return empty list of trackers @@ -1087,7 +1087,7 @@ def test_individual_label_metadata( ), ActionExecuted(action_text="Great!"), ActionExecuted(ACTION_LISTEN_NAME), - ], + ] ], [], ), @@ -1155,7 +1155,7 @@ def test_filter_training_trackers( UserUttered(text="happy to make it work"), ActionExecuted(action_text="Great!"), ActionExecuted(ACTION_LISTEN_NAME), - ], + ] ], True, ), @@ -1169,7 +1169,7 @@ def test_filter_training_trackers( UserUttered(text="happy to make it work"), ActionExecuted("utter_goodbye"), ActionExecuted(ACTION_LISTEN_NAME), - ], + ] ], True, ), @@ -1185,7 +1185,7 @@ def test_filter_training_trackers( ), ActionExecuted(action_text="Great!"), ActionExecuted(ACTION_LISTEN_NAME), - ], + ] ], True, ), diff --git a/tests/core/test_actions.py b/tests/core/test_actions.py index 2b695de04411..d82e81e89bbb 100644 --- a/tests/core/test_actions.py +++ b/tests/core/test_actions.py @@ -372,7 +372,7 @@ async def test_remote_action_utterances_with_none_values( UserUttered( text="hello", parse_data={ - "intent": {"name": "greet", "confidence": 0.9604260921478271,}, + "intent": {"name": "greet", "confidence": 0.9604260921478271}, "entities": [ {"entity": "city", "value": "London"}, {"entity": "count", "value": 1}, @@ -381,14 +381,14 @@ async def test_remote_action_utterances_with_none_values( "message_id": "3f4c04602a4947098c574b107d3ccc50", "metadata": {}, "intent_ranking": [ - {"name": "greet", "confidence": 0.9604260921478271,}, - {"name": "goodbye", "confidence": 0.01835782080888748,}, - {"name": "deny", "confidence": 0.011255578137934208,}, - {"name": "bot_challenge", "confidence": 0.004019865766167641,}, - {"name": "affirm", "confidence": 0.002524246694520116,}, - {"name": "mood_great", "confidence": 0.002214624546468258,}, - {"name": "chitchat", "confidence": 0.0009614597074687481,}, - {"name": "mood_unhappy", "confidence": 0.00024030178610701114,}, + {"name": "greet", "confidence": 0.9604260921478271}, + {"name": "goodbye", "confidence": 0.01835782080888748}, + {"name": "deny", "confidence": 0.011255578137934208}, + {"name": "bot_challenge", "confidence": 0.004019865766167641}, + {"name": "affirm", "confidence": 0.002524246694520116}, + {"name": "mood_great", "confidence": 0.002214624546468258}, + {"name": "chitchat", "confidence": 0.0009614597074687481}, + {"name": "mood_unhappy", "confidence": 0.00024030178610701114}, ], "response_selector": { "all_retrieval_intents": [], @@ -465,10 +465,7 @@ async def test_remote_action_invalid_entities_payload( endpoint = EndpointConfig("https://example.com/webhooks/actions") remote_action = action.RemoteAction("my_action", endpoint) - response = { - "events": [event], - "responses": [], - } + response = {"events": [event], "responses": []} with aioresponses() as mocked: mocked.post("https://example.com/webhooks/actions", payload=response) @@ -785,8 +782,7 @@ async def test_action_restart( assert events == [ BotUttered( - "congrats, you've restarted me!", - metadata={"utter_action": "utter_restart"}, + "congrats, you've restarted me!", metadata={"utter_action": "utter_restart"} ), Restarted(), ] @@ -1139,14 +1135,14 @@ async def test_run_end_to_end_utterance_action(): ["Bob", "Mary"], UserUttered( intent={"name": "inform"}, - entities=[{"entity": "name", "value": "John"},], + entities=[{"entity": "name", "value": "John"}], ), ["John"], ), ], ) async def test_action_extract_slots_predefined_mappings( - user: Event, slot_name: Text, slot_value: Any, new_user: Event, updated_value: Any, + user: Event, slot_name: Text, slot_value: Any, new_user: Event, updated_value: Any ): domain = Domain.from_yaml( textwrap.dedent( @@ -1342,7 +1338,7 @@ async def test_action_extract_slots_when_mapping_applies( ["mushrooms", "kebab"], ), # Only one entity was extracted for `ListSlot` - ([{"entity": "topping", "value": "kebab"},], ["kebab"],), + ([{"entity": "topping", "value": "kebab"}], ["kebab"]), ], ) async def test_action_extract_slots_with_list_slot( @@ -1381,7 +1377,7 @@ async def test_action_extract_slots_with_list_slot( ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, slot_name), UserUttered( - "bla", intent={"name": "greet", "confidence": 1.0}, entities=entities, + "bla", intent={"name": "greet", "confidence": 1.0}, entities=entities ), ActionExecuted(ACTION_LISTEN_NAME), ], @@ -1484,7 +1480,7 @@ async def test_action_extract_slots_with_matched_mapping_condition(): ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "name"), UserUttered( - "Emily", intent={"name": "inform", "confidence": 1.0}, entities=[], + "Emily", intent={"name": "inform", "confidence": 1.0}, entities=[] ), ], ) @@ -1699,7 +1695,7 @@ async def test_action_extract_slots_from_entity( domain = Domain.from_dict( { "entities": ["some_entity"], - "slots": {"some_slot": {"type": "any", "mappings": [mapping],}}, + "slots": {"some_slot": {"type": "any", "mappings": [mapping]}}, "forms": {form_name: {REQUIRED_SLOTS_KEY: ["some_slot"]}}, } ) @@ -1739,7 +1735,7 @@ async def test_action_extract_slots_from_entity( ["mushrooms", "kebab"], ), # Only one entity was extracted for `ListSlot` - ([{"entity": "topping", "value": "kebab"},], ["kebab"],), + ([{"entity": "topping", "value": "kebab"}], ["kebab"]), ], ) async def test_extract_other_list_slot_from_entity( @@ -1840,7 +1836,7 @@ async def test_trigger_slot_mapping_applies( }, "forms": { form_name: { - REQUIRED_SLOTS_KEY: [entity_name, slot_filled_by_trigger_mapping,] + REQUIRED_SLOTS_KEY: [entity_name, slot_filled_by_trigger_mapping] } }, } @@ -1903,7 +1899,7 @@ async def test_trigger_slot_mapping_does_not_apply(trigger_slot_mapping: Dict): }, "forms": { form_name: { - REQUIRED_SLOTS_KEY: [entity_name, slot_filled_by_trigger_mapping,] + REQUIRED_SLOTS_KEY: [entity_name, slot_filled_by_trigger_mapping] } }, } @@ -1960,7 +1956,7 @@ async def test_trigger_slot_mapping_does_not_apply(trigger_slot_mapping: Dict): [{"event": "slot", "name": "custom_slot", "value": True}], [SlotSet("custom_slot", True)], ), - (UserUttered("bla"), [], [],), + (UserUttered("bla"), [], []), ], ) async def test_action_extract_slots_execute_validation_action( @@ -2063,7 +2059,7 @@ async def test_action_extract_slots_custom_action_and_predefined_slot_validation ) domain = Domain.from_yaml(domain_yaml) event = UserUttered( - intent={"name": "inform"}, entities=[{"entity": "city", "value": "london"}], + intent={"name": "inform"}, entities=[{"entity": "city", "value": "london"}] ) tracker = DialogueStateTracker.from_events(sender_id="test_id", evts=[event]) @@ -2233,7 +2229,7 @@ async def test_action_extract_slots_disallowed_events(caplog: LogCaptureFixture) ], ) async def test_action_extract_slots_warns_custom_action_exceptions( - caplog: LogCaptureFixture, exception: Exception, + caplog: LogCaptureFixture, exception: Exception ): domain_yaml = textwrap.dedent( """ @@ -2258,9 +2254,7 @@ async def test_action_extract_slots_warns_custom_action_exceptions( action_server_url = "http:/my-action-server:5055/webhook" with aioresponses() as mocked: - mocked.post( - action_server_url, exception=exception, - ) + mocked.post(action_server_url, exception=exception) action_server = EndpointConfig(action_server_url) action_extract_slots = ActionExtractSlots(action_server) @@ -2561,7 +2555,7 @@ async def test_action_extract_slots_does_not_raise_disallowed_warning_for_slot_e action_server_url, payload={ "events": [ - {"event": "slot", "name": "custom_slot_a", "value": "test_A"}, + {"event": "slot", "name": "custom_slot_a", "value": "test_A"} ] }, ) @@ -2570,7 +2564,7 @@ async def test_action_extract_slots_does_not_raise_disallowed_warning_for_slot_e action_server_url, payload={ "events": [ - {"event": "slot", "name": "custom_slot_b", "value": "test_B"}, + {"event": "slot", "name": "custom_slot_b", "value": "test_B"} ] }, ) diff --git a/tests/core/test_agent.py b/tests/core/test_agent.py index 79a7a7d8d2a9..724057367ccd 100644 --- a/tests/core/test_agent.py +++ b/tests/core/test_agent.py @@ -102,7 +102,7 @@ async def test_agent_train(default_agent: Agent): } ], }, - ), + ) ], ) async def test_agent_parse_message( @@ -237,7 +237,7 @@ async def test_agent_handle_message_full_model(default_agent: Agent): ActionExecuted(action_name="action_session_start"), SessionStarted(), ActionExecuted(action_name="action_listen"), - UserUttered(text="hello", intent={"name": "greet"},), + UserUttered(text="hello", intent={"name": "greet"}), DefinePrevUserUtteredFeaturization(False), ActionExecuted(action_name="utter_greet"), BotUttered( @@ -273,7 +273,7 @@ async def test_agent_handle_message_only_nlu(trained_nlu_model: Text): ActionExecuted(action_name="action_session_start"), SessionStarted(), ActionExecuted(action_name="action_listen"), - UserUttered(text="hello", intent={"name": "greet"},), + UserUttered(text="hello", intent={"name": "greet"}), ], model_id, ) @@ -294,7 +294,7 @@ async def test_agent_handle_message_only_core(trained_core_model: Text): ActionExecuted(action_name="action_session_start"), SessionStarted(), ActionExecuted(action_name="action_listen"), - UserUttered(text="/greet", intent={"name": "greet"},), + UserUttered(text="/greet", intent={"name": "greet"}), DefinePrevUserUtteredFeaturization(False), ActionExecuted(action_name="utter_greet"), BotUttered( diff --git a/tests/core/test_ensemble.py b/tests/core/test_ensemble.py index 3502d085fadd..aa7354c8b767 100644 --- a/tests/core/test_ensemble.py +++ b/tests/core/test_ensemble.py @@ -31,7 +31,7 @@ def test_default_predict_complains_if_no_predictions_given( default_ensemble: DefaultPolicyPredictionEnsemble, ): domain = Domain.load("data/test_domains/default.yml") - tracker = DialogueStateTracker.from_events(sender_id="arbitrary", evts=[],) + tracker = DialogueStateTracker.from_events(sender_id="arbitrary", evts=[]) with pytest.raises(InvalidConfigException): default_ensemble.combine_predictions_from_kwargs(domain=domain, tracker=tracker) @@ -40,7 +40,7 @@ def test_default_predict_ignores_other_kwargs( default_ensemble: DefaultPolicyPredictionEnsemble, ): domain = Domain.load("data/test_domains/default.yml") - tracker = DialogueStateTracker.from_events(sender_id="arbitrary", evts=[],) + tracker = DialogueStateTracker.from_events(sender_id="arbitrary", evts=[]) prediction = PolicyPrediction( policy_name="arbitrary", probabilities=[1.0], policy_priority=1 ) diff --git a/tests/core/test_evaluation.py b/tests/core/test_evaluation.py index 9e138c47272b..751ffb305b10 100644 --- a/tests/core/test_evaluation.py +++ b/tests/core/test_evaluation.py @@ -311,7 +311,7 @@ def test_event_has_proper_implementation( ) async def test_retrieval_intent(response_selector_agent: Agent, test_file: Text): generator = _create_data_generator( - test_file, response_selector_agent, use_conversation_test_files=True, + test_file, response_selector_agent, use_conversation_test_files=True ) test_stories = generator.generate_story_trackers() @@ -451,7 +451,7 @@ async def test_e2e_with_entity_evaluation(e2e_bot_agent: Agent, tmp_path: Path): "with_warnings": 0, }, }, - ], + ] ], ) async def test_story_report( @@ -475,9 +475,7 @@ async def test_story_report( assert actual_results == expected_results -async def test_story_report_with_empty_stories( - tmpdir: Path, core_agent: Agent, -) -> None: +async def test_story_report_with_empty_stories(tmpdir: Path, core_agent: Agent) -> None: stories_path = tmpdir / "stories.yml" stories_path.write_text("", "utf8") out_directory = tmpdir / "results" @@ -494,12 +492,12 @@ async def test_story_report_with_empty_stories( @pytest.mark.parametrize( "skip_field,skip_value", [ - [None, None,], - ["precision", None,], - ["f1", None,], - ["in_training_data_fraction", None,], - ["report", None,], - ["include_report", False,], + [None, None], + ["precision", None], + ["f1", None], + ["in_training_data_fraction", None], + ["report", None], + ["include_report", False], ], ) async def test_log_evaluation_table(caplog, skip_field, skip_value): @@ -618,7 +616,7 @@ async def test_wrong_predictions_with_intent_and_entities( async def test_failed_entity_extraction_comment( - tmpdir: Path, restaurantbot_agent: Agent, + tmpdir: Path, restaurantbot_agent: Agent ): test_file = "data/test_yaml_stories/test_failed_entity_extraction_comment.yml" stories_path = str(tmpdir / FAILED_STORIES_FILE) diff --git a/tests/core/test_migrate.py b/tests/core/test_migrate.py index b80cff9b6c95..071088982754 100644 --- a/tests/core/test_migrate.py +++ b/tests/core/test_migrate.py @@ -112,7 +112,7 @@ def test_migrate_domain_format_with_required_slots( "type": "from_text", "intent": "inform", "conditions": [ - {"active_loop": "booking_form", "requested_slot": "email"}, + {"active_loop": "booking_form", "requested_slot": "email"} ], } ], @@ -215,7 +215,7 @@ def test_migrate_domain_form_without_required_slots( "type": "from_text", "intent": "inform", "conditions": [ - {"active_loop": "booking_form", "requested_slot": "email"}, + {"active_loop": "booking_form", "requested_slot": "email"} ], } ], @@ -286,11 +286,11 @@ def test_migrate_domain_with_diff_slot_types( { "active_loop": "reservation_form", "requested_slot": "outdoor_seating", - }, + } ], } ], - }, + } } assert migrated_slots == expected_slots @@ -583,7 +583,7 @@ def test_migrate_domain_format_duplicated_slots_in_forms( "type": "from_text", "intent": "greet", "conditions": [ - {"active_loop": "form_one", "requested_slot": "location"}, + {"active_loop": "form_one", "requested_slot": "location"} ], }, { @@ -642,7 +642,7 @@ def test_migrate_domain_dir_with_out_path_as_file(tmp_path: Path): assert domain -def test_migrate_domain_multiple_files_with_duplicate_slots(tmp_path: Path,): +def test_migrate_domain_multiple_files_with_duplicate_slots(tmp_path: Path): domain_dir = tmp_path / "domain" domain_dir.mkdir() @@ -833,7 +833,7 @@ def test_migrate_domain_raises_for_non_domain_files(tmp_path: Path): rasa.core.migrate.migrate_domain_format(domain_dir, domain_dir) -def test_migration_cleanup(tmp_path: Path,): +def test_migration_cleanup(tmp_path: Path): domain_dir = tmp_path / "domain" domain_dir.mkdir() migrated_domain_dir = tmp_path / "domain2" diff --git a/tests/core/test_policies.py b/tests/core/test_policies.py index 6045f0147c84..137129379227 100644 --- a/tests/core/test_policies.py +++ b/tests/core/test_policies.py @@ -12,10 +12,7 @@ from rasa.engine.storage.resource import Resource from rasa.engine.storage.storage import ModelStorage from rasa.shared.constants import DEFAULT_SENDER_ID -from rasa.shared.core.constants import ( - ACTION_LISTEN_NAME, - ACTION_UNLIKELY_INTENT_NAME, -) +from rasa.shared.core.constants import ACTION_LISTEN_NAME, ACTION_UNLIKELY_INTENT_NAME from rasa.shared.core.domain import Domain from rasa.shared.core.events import ( ActionExecuted, @@ -35,17 +32,10 @@ SingleStateFeaturizer, IntentTokenizerSingleStateFeaturizer, ) -from rasa.core.policies.policy import ( - SupportedData, - InvalidPolicyConfig, - Policy, -) +from rasa.core.policies.policy import SupportedData, InvalidPolicyConfig, Policy from rasa.core.policies.rule_policy import RulePolicy from rasa.core.policies.ted_policy import TEDPolicy -from rasa.core.policies.memoization import ( - AugmentedMemoizationPolicy, - MemoizationPolicy, -) +from rasa.core.policies.memoization import AugmentedMemoizationPolicy, MemoizationPolicy from rasa.shared.core.trackers import DialogueStateTracker from rasa.shared.core.generator import TrackerWithCachedStates @@ -80,7 +70,7 @@ def _policy_class_to_test() -> Type[Policy]: max_history = 3 # this is the amount of history we test on @pytest.fixture(scope="class") - def resource(self,) -> Resource: + def resource(self) -> Resource: return Resource(uuid.uuid4().hex) @pytest.fixture(scope="class") @@ -210,7 +200,7 @@ def test_prediction_on_empty_tracker( ): tracker = DialogueStateTracker(DEFAULT_SENDER_ID, default_domain.slots) prediction = trained_policy.predict_action_probabilities( - tracker, default_domain, + tracker, default_domain ) assert not prediction.is_end_to_end_prediction assert len(prediction.probabilities) == default_domain.num_actions @@ -228,12 +218,12 @@ def test_persist_and_load_empty_policy( ): resource = Resource(uuid.uuid4().hex) empty_policy = self.create_policy( - None, default_model_storage, resource, execution_context, + None, default_model_storage, resource, execution_context ) empty_policy.train([], default_domain) loaded = empty_policy.__class__.load( - self._config(), default_model_storage, resource, execution_context, + self._config(), default_model_storage, resource, execution_context ) assert loaded is not None @@ -241,7 +231,7 @@ def test_persist_and_load_empty_policy( @staticmethod def _get_next_action(policy: Policy, events: List[Event], domain: Domain) -> Text: tracker = get_tracker(events) - scores = policy.predict_action_probabilities(tracker, domain,).probabilities + scores = policy.predict_action_probabilities(tracker, domain).probabilities index = scores.index(max(scores)) return domain.action_names_or_texts[index] @@ -458,9 +448,7 @@ def test_finetune_after_load( default_domain, stories_path, augmentation_factor=20 ) - loaded_policy.train( - original_train_data + [new_story], default_domain, - ) + loaded_policy.train(original_train_data + [new_story], default_domain) # Get the hash of the tracker state of new story new_story_states, _ = loaded_policy.featurizer.training_states_and_labels( @@ -517,10 +505,10 @@ def test_ignore_action_unlikely_intent( "test 2", evts=tracker_events_without_action, slots=default_domain.slots ) prediction_with_action = trained_policy.predict_action_probabilities( - tracker_with_action, default_domain, + tracker_with_action, default_domain ) prediction_without_action = trained_policy.predict_action_probabilities( - tracker_without_action, default_domain, + tracker_without_action, default_domain ) # Memoization shouldn't be affected with the @@ -628,10 +616,10 @@ def test_prediction( ActionExecuted(UTTER_BYE_ACTION), ] training_story = TrackerWithCachedStates.from_events( - "training story", evts=events, domain=domain, slots=domain.slots, + "training story", evts=events, domain=domain, slots=domain.slots ) test_story = TrackerWithCachedStates.from_events( - "training story", events[:-1], domain=domain, slots=domain.slots, + "training story", events[:-1], domain=domain, slots=domain.slots ) policy.train([training_story], domain) prediction = policy.predict_action_probabilities(test_story, domain) diff --git a/tests/core/test_processor.py b/tests/core/test_processor.py index 3a6ebb6d9a64..46a4b1e657ca 100644 --- a/tests/core/test_processor.py +++ b/tests/core/test_processor.py @@ -590,7 +590,7 @@ async def test_update_tracker_session( async def test_update_tracker_session_with_metadata( - default_processor: MessageProcessor, monkeypatch: MonkeyPatch, + default_processor: MessageProcessor, monkeypatch: MonkeyPatch ): model_id = default_processor.model_metadata.model_id sender_id = uuid.uuid4().hex @@ -607,7 +607,7 @@ async def test_update_tracker_session_with_metadata( events = list(tracker.events) assert events[0] == with_model_id( - SlotSet(SESSION_START_METADATA_SLOT, message_metadata,), model_id + SlotSet(SESSION_START_METADATA_SLOT, message_metadata), model_id ) assert tracker.slots[SESSION_START_METADATA_SLOT].value == message_metadata @@ -706,10 +706,7 @@ async def test_update_tracker_session_with_slots( assert events[2:7] == slot_set_events # the next two events are the session start sequence - assert events[7:9] == [ - ActionExecuted(ACTION_SESSION_START_NAME), - SessionStarted(), - ] + assert events[7:9] == [ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()] assert events[9:14] == slot_set_events # finally an action listen, this should also be the last event @@ -876,7 +873,7 @@ async def test_handle_message_with_session_start( SlotSet(entity, slot_1[entity]), DefinePrevUserUtteredFeaturization(False), ActionExecuted("utter_greet"), - BotUttered("hey there Core!", metadata={"utter_action": "utter_greet"},), + BotUttered("hey there Core!", metadata={"utter_action": "utter_greet"}), ActionExecuted(ACTION_LISTEN_NAME), ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted(), @@ -932,7 +929,7 @@ async def test_should_predict_another_action( async def test_action_unlikely_intent_metadata(default_processor: MessageProcessor): tracker = DialogueStateTracker.from_events( - "some-sender", evts=[ActionExecuted(ACTION_LISTEN_NAME),], + "some-sender", evts=[ActionExecuted(ACTION_LISTEN_NAME)] ) domain = Domain.empty() metadata = {"key1": 1, "key2": "2"} @@ -1002,7 +999,7 @@ async def test_restart_triggers_session_start( SlotSet(entity, slot_1[entity]), DefinePrevUserUtteredFeaturization(use_text_for_featurization=False), ActionExecuted("utter_greet"), - BotUttered("hey there name1!", metadata={"utter_action": "utter_greet"},), + BotUttered("hey there name1!", metadata={"utter_action": "utter_greet"}), ActionExecuted(ACTION_LISTEN_NAME), UserUttered("/restart", {INTENT_NAME_KEY: "restart", "confidence": 1.0}), DefinePrevUserUtteredFeaturization(use_text_for_featurization=False), @@ -1165,7 +1162,7 @@ async def mocked_run(*args: Any, **kwargs: Any) -> List[Event]: ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted(), ActionExecuted(ACTION_LISTEN_NAME), - UserUttered(user_message, intent={"name": "greet"},), + UserUttered(user_message, intent={"name": "greet"}), ActionExecutionRejected(ACTION_LISTEN_NAME), ], model_id, @@ -1175,7 +1172,7 @@ async def mocked_run(*args: Any, **kwargs: Any) -> List[Event]: async def test_logging_of_end_to_end_action( - default_processor: MessageProcessor, monkeypatch: MonkeyPatch, + default_processor: MessageProcessor, monkeypatch: MonkeyPatch ): model_id = default_processor.model_metadata.model_id end_to_end_action = "hi, how are you?" @@ -1228,7 +1225,7 @@ def combine_predictions( ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted(), ActionExecuted(ACTION_LISTEN_NAME), - UserUttered(user_message, intent={"name": "greet"},), + UserUttered(user_message, intent={"name": "greet"}), ActionExecuted(action_text=end_to_end_action), BotUttered("hi, how are you?", {}, {}, 123), ActionExecuted(ACTION_LISTEN_NAME), @@ -1481,7 +1478,7 @@ def test_get_tracker_adds_model_id(default_processor: MessageProcessor): async def test_processor_e2e_slot_set(e2e_bot_agent: Agent, caplog: LogCaptureFixture): processor = e2e_bot_agent.processor - message = UserMessage("I am feeling sad.", CollectingOutputChannel(), "test",) + message = UserMessage("I am feeling sad.", CollectingOutputChannel(), "test") with caplog.at_level(logging.DEBUG): await processor.handle_message(message) diff --git a/tests/core/test_test.py b/tests/core/test_test.py index 134df290d886..b9d275c35f11 100644 --- a/tests/core/test_test.py +++ b/tests/core/test_test.py @@ -40,7 +40,7 @@ def _probabilities_with_action_unlikely_intent_for( _original = DefaultPolicyPredictionEnsemble.combine_predictions_from_kwargs def combine_predictions_from_kwargs( - self, tracker: DialogueStateTracker, domain: Domain, **kwargs: Any, + self, tracker: DialogueStateTracker, domain: Domain, **kwargs: Any ) -> PolicyPrediction: latest_event = tracker.events[-1] if ( @@ -126,9 +126,7 @@ async def test_testing_warns_if_action_unknown( async def test_testing_with_utilizing_retrieval_intents( - response_selector_agent: Agent, - response_selector_test_stories: Path, - tmp_path: Path, + response_selector_agent: Agent, response_selector_test_stories: Path, tmp_path: Path ): result = await rasa.core.test.test( stories=response_selector_test_stories, @@ -522,7 +520,7 @@ async def test_action_unlikely_intent_warning_and_story_error( agent = await _train_rule_based_agent(train_file_name, True) result = await rasa.core.test.test( - str(test_file_name), agent, out_directory=str(tmp_path), + str(test_file_name), agent, out_directory=str(tmp_path) ) assert "report" in result.keys() assert result["report"]["conversation_accuracy"]["correct"] == 0 diff --git a/tests/core/test_tracker_stores.py b/tests/core/test_tracker_stores.py index 2905a81f813b..c08b4eeb8047 100644 --- a/tests/core/test_tracker_stores.py +++ b/tests/core/test_tracker_stores.py @@ -226,9 +226,7 @@ def test_raise_connection_exception_redis_tracker_store_creation( TrackerStore.create(store, domain) -def test_mongo_tracker_store_raise_exception( - domain: Domain, monkeypatch: MonkeyPatch, -): +def test_mongo_tracker_store_raise_exception(domain: Domain, monkeypatch: MonkeyPatch): monkeypatch.setattr( rasa.core.tracker_store, "MongoTrackerStore", @@ -593,7 +591,7 @@ def test_sql_additional_events_with_session_start(domain: Domain): [(MockedMongoTrackerStore, {}), (SQLTrackerStore, {"host": "sqlite:///"})], ) def test_tracker_store_retrieve_with_session_started_events( - tracker_store_type: Type[TrackerStore], tracker_store_kwargs: Dict, domain: Domain, + tracker_store_type: Type[TrackerStore], tracker_store_kwargs: Dict, domain: Domain ): tracker_store = tracker_store_type(domain, **tracker_store_kwargs) events = [ @@ -622,7 +620,7 @@ def test_tracker_store_retrieve_with_session_started_events( [(MockedMongoTrackerStore, {}), (SQLTrackerStore, {"host": "sqlite:///"})], ) def test_tracker_store_retrieve_without_session_started_events( - tracker_store_type: Type[TrackerStore], tracker_store_kwargs: Dict, domain, + tracker_store_type: Type[TrackerStore], tracker_store_kwargs: Dict, domain ): tracker_store = tracker_store_type(domain, **tracker_store_kwargs) @@ -871,7 +869,7 @@ def test_login_db_with_no_postgresql(tmp_path: Path): "type": "mongod", "url": "mongodb://0.0.0.0:42/?serverSelectionTimeoutMS=5000", }, - {"type": "dynamo",}, + {"type": "dynamo"}, ], ) def test_tracker_store_connection_error(config: Dict, domain: Domain): @@ -882,7 +880,7 @@ def test_tracker_store_connection_error(config: Dict, domain: Domain): async def prepare_token_serialisation( - tracker_store: TrackerStore, response_selector_agent: Agent, sender_id: Text, + tracker_store: TrackerStore, response_selector_agent: Agent, sender_id: Text ): text = "Good morning" tokenizer = WhitespaceTokenizer(WhitespaceTokenizer.get_default_config()) diff --git a/tests/core/test_training.py b/tests/core/test_training.py index 6e4ab56b2dd4..8a78e3d62508 100644 --- a/tests/core/test_training.py +++ b/tests/core/test_training.py @@ -36,7 +36,7 @@ async def test_random_seed( tmp_path: Path, monkeypatch: MonkeyPatch, domain_path: Text, stories_path: Text ): policies_config = { - "policies": [{"name": "TEDPolicy", "random_seed": 42}, {"name": "RulePolicy"},] + "policies": [{"name": "TEDPolicy", "random_seed": 42}, {"name": "RulePolicy"}] } config_file = tmp_path / "config.yml" rasa.shared.utils.io.write_yaml(policies_config, config_file) diff --git a/tests/core/training/test_interactive.py b/tests/core/training/test_interactive.py index bb24dcf9fab1..ad25591b81b8 100644 --- a/tests/core/training/test_interactive.py +++ b/tests/core/training/test_interactive.py @@ -480,9 +480,7 @@ async def test_undo_latest_msg(mock_endpoint): assert corrected_event["event"] == "undo" -async def test_write_stories_to_file( - mock_endpoint: EndpointConfig, tmp_path, -): +async def test_write_stories_to_file(mock_endpoint: EndpointConfig, tmp_path): tracker_dump = rasa.shared.utils.io.read_file( "data/test_trackers/tracker_moodbot_with_new_utterances.json" ) @@ -620,7 +618,7 @@ async def test_filter_intents_before_save_nlu_file(domain_path: Text): @pytest.mark.parametrize( "path, expected_format", - [("bla.json", RASA), ("other.yml", RASA_YAML), ("unknown", UNK),], + [("bla.json", RASA), ("other.yml", RASA_YAML), ("unknown", UNK)], ) def test_get_nlu_target_format(path: Text, expected_format: Text): assert interactive._get_nlu_target_format(path) == expected_format @@ -762,9 +760,7 @@ async def test_correct_question_for_action_name_was_asked( tracker = DialogueStateTracker.from_events("some_sender", []) monkeypatch.setattr( - interactive, - "retrieve_tracker", - AsyncMock(return_value=tracker.current_state()), + interactive, "retrieve_tracker", AsyncMock(return_value=tracker.current_state()) ) monkeypatch.setattr( interactive, "_ask_questions", AsyncMock(return_value=is_marked_as_correct) @@ -772,7 +768,7 @@ async def test_correct_question_for_action_name_was_asked( monkeypatch.setattr( interactive, "_request_action_from_user", - AsyncMock(return_value=("action_another_one", False,)), + AsyncMock(return_value=("action_another_one", False)), ) mocked_send_action = AsyncMock() diff --git a/tests/core/training/test_story_conflict.py b/tests/core/training/test_story_conflict.py index f4b4fb7ca743..251efcc22fe0 100644 --- a/tests/core/training/test_story_conflict.py +++ b/tests/core/training/test_story_conflict.py @@ -181,16 +181,16 @@ async def test_has_prior_events(): async def test_get_previous_event(): assert _get_previous_event( {PREVIOUS_ACTION: {"action_name": "utter_greet"}, USER: {"intent": "greet"}} - ) == ("action", "utter_greet",) + ) == ("action", "utter_greet") assert _get_previous_event( {PREVIOUS_ACTION: {"action_text": "this is a test"}, USER: {"intent": "greet"}} - ) == ("bot utterance", "this is a test",) + ) == ("bot utterance", "this is a test") assert _get_previous_event( { PREVIOUS_ACTION: {"action_name": ACTION_LISTEN_NAME}, USER: {"intent": "greet"}, } - ) == ("intent", "greet",) + ) == ("intent", "greet") async def test_has_no_prior_events(): diff --git a/tests/dialogues.py b/tests/dialogues.py index 9bb8326397d2..412326cc3baf 100644 --- a/tests/dialogues.py +++ b/tests/dialogues.py @@ -11,7 +11,7 @@ TEST_DEFAULT_DIALOGUE = Dialogue( name="default", events=[ - ActionExecuted(action_name="action_listen", timestamp=1551952977.4850519,), + ActionExecuted(action_name="action_listen", timestamp=1551952977.4850519), UserUttered( entities=[{"end": 19, "entity": "name", "start": 14, "value": "Peter"}], intent={"confidence": 0.0, "name": "greet"}, @@ -29,7 +29,7 @@ timestamp=1551953035.076376, ), SlotSet(key="name", timestamp=1551953035.076385, value="Peter"), - ActionExecuted(action_name="utter_greet", timestamp=1551953040.607782,), + ActionExecuted(action_name="utter_greet", timestamp=1551953040.607782), BotUttered( data={"attachment": None, "buttons": None, "elements": None}, text="hey there Peter!", @@ -40,7 +40,7 @@ TEST_FORMBOT_DIALOGUE = Dialogue( name="formbot", events=[ - ActionExecuted(action_name="action_listen", timestamp=1551884035.892855,), + ActionExecuted(action_name="action_listen", timestamp=1551884035.892855), UserUttered( intent={"confidence": 0.3748943507671356, "name": "greet"}, parse_data={ @@ -128,7 +128,7 @@ timestamp=1551884208.092693, ), ActionExecuted( - action_name="action_deactivate_loop", timestamp=1551884214.951055, + action_name="action_deactivate_loop", timestamp=1551884214.951055 ), ActiveLoop(name=None, timestamp=1551884214.9510589), SlotSet(key="requested_slot", timestamp=1551884214.951062, value=None), @@ -143,7 +143,7 @@ TEST_MOODBOT_DIALOGUE = Dialogue( name="moodbot", events=[ - ActionExecuted(action_name="action_listen", timestamp=1551883958.346432,), + ActionExecuted(action_name="action_listen", timestamp=1551883958.346432), UserUttered( intent={"confidence": 0.44488201660555066, "name": "greet"}, parse_data={ diff --git a/tests/docs/test_docs_use_base_url.py b/tests/docs/test_docs_use_base_url.py index a221200bec91..7093a6ab79ae 100644 --- a/tests/docs/test_docs_use_base_url.py +++ b/tests/docs/test_docs_use_base_url.py @@ -10,7 +10,7 @@ # we're matching anchors with href containing strings, but not starting # with "http". This also exclude local href already configured using `useBaseUrl()` ANCHOR_RE = re.compile( - r"<(a|Button)[^>]*href=\"(?P(?!http).+?)\"[^>]*>", re.DOTALL, + r"<(a|Button)[^>]*href=\"(?P(?!http).+?)\"[^>]*>", re.DOTALL ) diff --git a/tests/engine/recipes/test_default_recipe.py b/tests/engine/recipes/test_default_recipe.py index 84c61e5f4b6b..3f977e6ac7c5 100644 --- a/tests/engine/recipes/test_default_recipe.py +++ b/tests/engine/recipes/test_default_recipe.py @@ -136,7 +136,7 @@ def test_generate_graphs( config = rasa.shared.utils.io.read_yaml_file(config_path) - recipe = Recipe.recipe_for_name(DefaultV1Recipe.name,) + recipe = Recipe.recipe_for_name(DefaultV1Recipe.name) model_config = recipe.graph_config_for_recipe( config, {}, training_type=training_type, is_finetuning=is_finetuning ) @@ -173,7 +173,7 @@ def test_language_returning(): ) recipe = Recipe.recipe_for_name(DefaultV1Recipe.name) - model_config = recipe.graph_config_for_recipe(config, {},) + model_config = recipe.graph_config_for_recipe(config, {}) assert model_config.language == "xy" @@ -193,7 +193,7 @@ def test_tracker_generator_parameter_interpolation(): recipe = Recipe.recipe_for_name(DefaultV1Recipe.name) model_config = recipe.graph_config_for_recipe( - config, {"augmentation_factor": augmentation, "debug_plots": debug_plots}, + config, {"augmentation_factor": augmentation, "debug_plots": debug_plots} ) node = model_config.train_schema.nodes["training_tracker_provider"] @@ -216,7 +216,7 @@ def test_nlu_training_data_persistence(): recipe = Recipe.recipe_for_name(DefaultV1Recipe.name) model_config = recipe.graph_config_for_recipe( - config, {"persist_nlu_training_data": True}, + config, {"persist_nlu_training_data": True} ) node = model_config.train_schema.nodes["nlu_training_data_provider"] @@ -239,7 +239,7 @@ def test_num_threads_interpolation(): for node_name, node in expected_train_schema.nodes.items(): if issubclass( node.uses, - (SklearnIntentClassifier, MitieEntityExtractor, MitieIntentClassifier,), + (SklearnIntentClassifier, MitieEntityExtractor, MitieIntentClassifier), ) and node_name.startswith("train_"): node.config["num_threads"] = 20 @@ -387,19 +387,19 @@ def test_retrieve_via_invalid_module_path(): with pytest.raises(ImportError): path = "rasa.core.policies.ted_policy.TEDPolicy1000" DefaultV1Recipe().graph_config_for_recipe( - {"policies": [{"name": path}]}, {}, TrainingType.CORE, + {"policies": [{"name": path}]}, {}, TrainingType.CORE ) def test_train_nlu_without_nlu_pipeline(): with pytest.raises(InvalidConfigException): DefaultV1Recipe().graph_config_for_recipe( - {"pipeline": []}, {}, TrainingType.NLU, + {"pipeline": []}, {}, TrainingType.NLU ) def test_train_core_without_nlu_pipeline(): with pytest.raises(InvalidConfigException): DefaultV1Recipe().graph_config_for_recipe( - {"policies": []}, {}, TrainingType.CORE, + {"policies": []}, {}, TrainingType.CORE ) diff --git a/tests/engine/runner/test_dask.py b/tests/engine/runner/test_dask.py index fa749ae0223d..ec85037458a9 100644 --- a/tests/engine/runner/test_dask.py +++ b/tests/engine/runner/test_dask.py @@ -3,11 +3,7 @@ import pytest -from rasa.engine.graph import ( - ExecutionContext, - GraphSchema, - SchemaNode, -) +from rasa.engine.graph import ExecutionContext, GraphSchema, SchemaNode from rasa.engine.exceptions import GraphRunError from rasa.engine.runner.dask import DaskGraphRunner from rasa.engine.storage.storage import ModelStorage @@ -104,7 +100,7 @@ def test_default_config( constructor_name="create", config={"x": x} if x else {}, is_target=True, - ), + ) } ) @@ -138,7 +134,7 @@ def test_no_inputs(default_model_storage: ModelStorage): constructor_name="create", config={}, is_target=True, - ), + ) } ) runner = DaskGraphRunner( @@ -159,7 +155,7 @@ def test_no_target(default_model_storage: ModelStorage): fn="provide", constructor_name="create", config={}, - ), + ) } ) runner = DaskGraphRunner( @@ -213,7 +209,7 @@ def test_non_eager_can_use_inputs_for_constructor(default_model_storage: ModelSt config={}, eager=False, is_target=True, - ), + ) } ) runner = DaskGraphRunner( @@ -235,7 +231,7 @@ def test_can_use_alternate_constructor(default_model_storage: ModelStorage): constructor_name="create_with_2", config={}, is_target=True, - ), + ) } ) runner = DaskGraphRunner( @@ -257,7 +253,7 @@ def test_execution_context(default_model_storage: ModelStorage): constructor_name="create", config={}, is_target=True, - ), + ) } ) context = ExecutionContext(graph_schema=graph_schema, model_id="some_id") @@ -282,7 +278,7 @@ def test_input_value_is_node_name(default_model_storage: ModelStorage): constructor_name="create", config={}, is_target=True, - ), + ) } ) runner = DaskGraphRunner( diff --git a/tests/engine/storage/test_local_model_storage.py b/tests/engine/storage/test_local_model_storage.py index 46dafc265eff..03a7a739d2ef 100644 --- a/tests/engine/storage/test_local_model_storage.py +++ b/tests/engine/storage/test_local_model_storage.py @@ -83,9 +83,7 @@ def test_read_from_rasa2_resource(tmp_path_factory: TempPathFactory): storage.metadata_from_archive(model_archive_path=model_zips / resource_name) -def test_create_model_package( - tmp_path_factory: TempPathFactory, domain: Domain, -): +def test_create_model_package(tmp_path_factory: TempPathFactory, domain: Domain): train_model_storage = LocalModelStorage( tmp_path_factory.mktemp("train model storage") ) @@ -118,7 +116,7 @@ def test_create_model_package( fn="run", constructor_name="load", config={"some_config": 123455, "some more config": [{"nested": "hi"}]}, - ), + ) } ) @@ -146,7 +144,7 @@ def test_create_model_package( just_packaged_metadata = LocalModelStorage.metadata_from_archive(archive_path) - (load_model_storage, packaged_metadata,) = LocalModelStorage.from_model_archive( + (load_model_storage, packaged_metadata) = LocalModelStorage.from_model_archive( load_model_storage_dir, archive_path ) @@ -166,7 +164,7 @@ def test_create_model_package( def test_read_unsupported_model( - monkeypatch: MonkeyPatch, tmp_path_factory: TempPathFactory, domain: Domain, + monkeypatch: MonkeyPatch, tmp_path_factory: TempPathFactory, domain: Domain ): train_model_storage = LocalModelStorage( tmp_path_factory.mktemp("train model storage") diff --git a/tests/engine/storage/test_storage.py b/tests/engine/storage/test_storage.py index e49103343a97..3c169772fb54 100644 --- a/tests/engine/storage/test_storage.py +++ b/tests/engine/storage/test_storage.py @@ -40,7 +40,7 @@ def test_metadata_serialization(domain: Domain, tmp_path: Path): fn="run", constructor_name="load", config={"some_config": 123455, "some more config": [{"nested": "hi"}]}, - ), + ) } ) diff --git a/tests/engine/test_caching.py b/tests/engine/test_caching.py index 5c0ad884b6ee..96bf60382b27 100644 --- a/tests/engine/test_caching.py +++ b/tests/engine/test_caching.py @@ -236,7 +236,7 @@ def test_restore_cached_output_with_invalid_module( ) monkeypatch.setattr( - rasa.shared.utils.common, "class_from_module_path", cached_module, + rasa.shared.utils.common, "class_from_module_path", cached_module ) assert ( diff --git a/tests/engine/test_graph.py b/tests/engine/test_graph.py index 994b11466ad0..e634c300b9df 100644 --- a/tests/engine/test_graph.py +++ b/tests/engine/test_graph.py @@ -31,7 +31,7 @@ def test_serialize_graph_schema(tmp_path: Path): is_target=True, resource=Resource("test resource"), ), - }, + } ) serialized = graph_schema.as_dict() @@ -55,7 +55,7 @@ def test_invalid_module_error_when_deserializing_schemas(tmp_path: Path): fn="train", constructor_name="create", config={"some_config": 123455, "some more config": [{"nested": "hi"}]}, - ), + ) } ) diff --git a/tests/engine/test_graph_node.py b/tests/engine/test_graph_node.py index 38559b9afdb0..726f1027222a 100644 --- a/tests/engine/test_graph_node.py +++ b/tests/engine/test_graph_node.py @@ -8,12 +8,7 @@ import rasa.shared.utils import rasa.shared.utils.io from rasa.engine.exceptions import GraphComponentException -from rasa.engine.graph import ( - ExecutionContext, - GraphComponent, - GraphNode, - GraphSchema, -) +from rasa.engine.graph import ExecutionContext, GraphComponent, GraphNode, GraphSchema from rasa.engine.storage.storage import ModelStorage from rasa.engine.storage.resource import Resource from tests.engine.graph_components_test_classes import ( diff --git a/tests/engine/test_loader.py b/tests/engine/test_loader.py index 5309251cbb51..baf634f6ebd0 100644 --- a/tests/engine/test_loader.py +++ b/tests/engine/test_loader.py @@ -41,7 +41,7 @@ def test_loader_loads_graph_runner( uses=PersistableTestComponent, fn="train", constructor_name="create", - config={"test_value": test_value,}, + config={"test_value": test_value}, is_target=True, ), "load": SchemaNode( @@ -63,14 +63,14 @@ def test_loader_loads_graph_runner( config={}, is_target=True, resource=Resource("train"), - ), + ) } ) output_filename = tmp_path / "model.tar.gz" importer = TrainingDataImporter.load_from_dict( - training_data_paths=[], domain_path=str(domain_path), + training_data_paths=[], domain_path=str(domain_path) ) trained_at = datetime.utcnow() diff --git a/tests/engine/test_validation.py b/tests/engine/test_validation.py index c171e6fbe718..5ddeb062916b 100644 --- a/tests/engine/test_validation.py +++ b/tests/engine/test_validation.py @@ -53,7 +53,7 @@ def run(self) -> List[Message]: class TestCoreTarget(TestComponentWithoutRun): - def run(self,) -> PolicyPrediction: + def run(self) -> PolicyPrediction: pass @@ -105,7 +105,7 @@ def create_test_schema( ), **DEFAULT_PREDICT_SCHEMA.nodes, **parent_node, - }, + } ) if is_train_graph: @@ -140,7 +140,7 @@ def test_graph_component_fn_does_not_exist(): with pytest.raises( GraphSchemaValidationException, match="required method 'some_fn'" ): - validation.validate(graph_config,) + validation.validate(graph_config) def test_graph_output_is_not_fingerprintable_int(): @@ -151,7 +151,7 @@ def run(self) -> int: graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="fingerprintable"): - validation.validate(graph_config,) + validation.validate(graph_config) def test_predict_graph_output_is_not_fingerprintable(): @@ -172,7 +172,7 @@ def run(self) -> Any: graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="fingerprintable"): - validation.validate(graph_config,) + validation.validate(graph_config) def test_graph_output_is_not_fingerprintable_None(): @@ -180,10 +180,10 @@ class MyComponent(TestComponentWithoutRun): def run(self) -> None: pass - graph_config = create_test_schema(uses=MyComponent,) + graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="fingerprintable"): - validation.validate(graph_config,) + validation.validate(graph_config) def test_graph_with_forward_referenced_output_type(): @@ -196,7 +196,7 @@ def run(self) -> "UserUttered": # noqa: F821 graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="forward reference"): - validation.validate(graph_config,) + validation.validate(graph_config) def test_graph_output_missing_type_annotation(): @@ -209,7 +209,7 @@ def run(self): with pytest.raises( GraphSchemaValidationException, match="does not have a type annotation" ): - validation.validate(graph_config,) + validation.validate(graph_config) def test_graph_with_fingerprintable_output(): @@ -233,7 +233,7 @@ def run(self) -> MyTrainingData: graph_config = create_test_schema(uses=MyComponent) - validation.validate(graph_config,) + validation.validate(graph_config) def test_graph_constructor_missing(): @@ -246,7 +246,7 @@ def run(self) -> TrainingData: with pytest.raises( GraphSchemaValidationException, match="required method 'invalid'" ): - validation.validate(graph_config,) + validation.validate(graph_config) def test_graph_constructor_config_wrong_type(): @@ -264,7 +264,7 @@ def create( graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="incompatible type"): - validation.validate(graph_config,) + validation.validate(graph_config) def test_graph_constructor_resource_wrong_type(): @@ -282,7 +282,7 @@ def create( graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="incompatible type"): - validation.validate(graph_config,) + validation.validate(graph_config) def test_graph_constructor_model_storage_wrong_type(): @@ -300,7 +300,7 @@ def create( graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="incompatible type"): - validation.validate(graph_config,) + validation.validate(graph_config) def test_graph_constructor_execution_context_wrong_type(): @@ -318,7 +318,7 @@ def create( graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="incompatible type"): - validation.validate(graph_config,) + validation.validate(graph_config) @pytest.mark.parametrize( @@ -359,7 +359,7 @@ def supported_languages() -> Optional[List[Text]]: @pytest.mark.parametrize( - "current_language, not_supported_languages", [("de", ["de", "en"]), ("en", ["en"])], + "current_language, not_supported_languages", [("de", ["de", "en"]), ("en", ["en"])] ) def test_graph_constructor_execution_exclusive_list_not_supported_language( current_language: Text, not_supported_languages: Optional[List[Text]] @@ -411,7 +411,7 @@ def required_packages() -> List[Text]: graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="not installed"): - validation.validate(graph_config,) + validation.validate(graph_config) @pytest.mark.parametrize("required_packages", [["tensorflow"], ["tensorflow", "numpy"]]) @@ -424,7 +424,7 @@ def required_packages() -> List[Text]: graph_config = create_test_schema(uses=MyComponent) - validation.validate(graph_config,) + validation.validate(graph_config) def test_run_param_not_satisfied(): @@ -435,7 +435,7 @@ def run(self, some_param: TrainingData) -> TrainingData: graph_config = create_test_schema(uses=MyComponent) with pytest.raises(GraphSchemaValidationException, match="needs the param"): - validation.validate(graph_config,) + validation.validate(graph_config) def test_run_param_satifisfied_due_to_default(): @@ -445,7 +445,7 @@ def run(self, some_param: TrainingData = TrainingData()) -> TrainingData: graph_config = create_test_schema(uses=MyComponent) - validation.validate(graph_config,) + validation.validate(graph_config) def test_too_many_supplied_params(): @@ -468,7 +468,7 @@ def run(self, **kwargs: Any) -> TrainingData: uses=MyComponent, needs={"some_param": "parent"}, parent=TestComponentWithRun ) - validation.validate(graph_config,) + validation.validate(graph_config) def test_run_fn_with_variable_length_positional_param(): @@ -480,7 +480,7 @@ def run(self, *args: Any, some_param: TrainingData) -> TrainingData: uses=MyComponent, needs={"some_param": "parent"}, parent=TestComponentWithRun ) - validation.validate(graph_config,) + validation.validate(graph_config) def test_matching_params_due_to_constructor(): @@ -550,7 +550,7 @@ def load( pass graph_config = create_test_schema( - uses=MyComponent, eager=eager, constructor_name="load", + uses=MyComponent, eager=eager, constructor_name="load" ) with pytest.raises(GraphSchemaValidationException, match=error_message): @@ -583,7 +583,7 @@ def run(self) -> Domain: with pytest.raises( GraphSchemaValidationException, match="expects an input of type" ): - validation.validate(graph_config,) + validation.validate(graph_config) def test_parent_supplying_wrong_type_to_constructor(): @@ -614,7 +614,7 @@ def load( with pytest.raises( GraphSchemaValidationException, match="expects an input of type" ): - validation.validate(graph_config,) + validation.validate(graph_config) def test_parent_supplying_subtype(): @@ -627,10 +627,10 @@ def run(self, training_data: TrainingData) -> TrainingData: pass graph_config = create_test_schema( - uses=MyComponent, parent=Parent, needs={"training_data": "parent"}, + uses=MyComponent, parent=Parent, needs={"training_data": "parent"} ) - validation.validate(graph_config,) + validation.validate(graph_config) def test_child_accepting_any_type_from_parent(): @@ -643,10 +643,10 @@ def run(self, training_data: Any) -> TrainingData: pass graph_config = create_test_schema( - uses=MyComponent, parent=Parent, needs={"training_data": "parent"}, + uses=MyComponent, parent=Parent, needs={"training_data": "parent"} ) - validation.validate(graph_config,) + validation.validate(graph_config) @pytest.mark.parametrize("is_train_graph", [True, False]) @@ -756,7 +756,7 @@ def test_validation_with_missing_nlu_target(): constructor_name="create", fn="run", config={}, - ), + ) } ) @@ -798,7 +798,7 @@ def run(self, nlu_target_output: List[Message]) -> List[Message]: fn="run", config={}, ), - }, + } ) with pytest.raises( @@ -826,8 +826,8 @@ def test_validation_with_nlu_target_wrong_type(): constructor_name="create", fn="run", config={}, - ), - }, + ) + } ) with pytest.raises(GraphSchemaValidationException, match="invalid return type"): @@ -853,8 +853,8 @@ def test_validation_with_missing_core_target(): constructor_name="create", fn="run", config={}, - ), - }, + ) + } ) with pytest.raises(GraphSchemaValidationException, match="invalid Core target"): @@ -880,12 +880,12 @@ def test_validation_with_core_target_wrong_type(): constructor_name="create", fn="run", config={}, - ), - }, + ) + } ) with pytest.raises( - GraphSchemaValidationException, match="Core model's .* invalid return type", + GraphSchemaValidationException, match="Core model's .* invalid return type" ): validation.validate( GraphModelConfiguration( @@ -930,7 +930,7 @@ def run(self, core_target_output: PolicyPrediction) -> PolicyPrediction: fn="run", config={}, ), - }, + } ) with pytest.raises( @@ -1142,7 +1142,7 @@ def test_validate_validates_required_components( itertools.product(REQUIRED_COMPONENT_TEST_CASES, [True, False]), ) def test_validate_required_components( - test_case: List[RequiredComponentsTestCase], test_subclass: bool, + test_case: List[RequiredComponentsTestCase], test_subclass: bool ): graph_schema = _create_graph_schema_from_requirements( node_needs_requires=test_case.node_needs_requires_tuples, @@ -1151,7 +1151,7 @@ def test_validate_required_components( ) num_unmet = test_case.num_unmet_requirements if num_unmet == 0: - validation._validate_required_components(schema=graph_schema,) + validation._validate_required_components(schema=graph_schema) else: message = f"{num_unmet} components are missing" with pytest.raises(GraphSchemaValidationException, match=message): @@ -1170,7 +1170,7 @@ def test_validate_required_components( ), ) def test_recursively_validate_required_components( - test_case: List[RequiredComponentsTestCase], test_subclass: bool, + test_case: List[RequiredComponentsTestCase], test_subclass: bool ): graph_schema = _create_graph_schema_from_requirements( node_needs_requires=test_case.node_needs_requires_tuples, diff --git a/tests/engine/training/test_fingerprinting.py b/tests/engine/training/test_fingerprinting.py index d535e635e6b6..ee1c94205211 100644 --- a/tests/engine/training/test_fingerprinting.py +++ b/tests/engine/training/test_fingerprinting.py @@ -11,10 +11,10 @@ def test_fingerprint_stays_same(): key1 = fingerprinting.calculate_fingerprint_key( - TEDPolicy, TEDPolicy.get_default_config(), {"input": FingerprintableText("Hi")}, + TEDPolicy, TEDPolicy.get_default_config(), {"input": FingerprintableText("Hi")} ) key2 = fingerprinting.calculate_fingerprint_key( - TEDPolicy, TEDPolicy.get_default_config(), {"input": FingerprintableText("Hi")}, + TEDPolicy, TEDPolicy.get_default_config(), {"input": FingerprintableText("Hi")} ) assert key1 == key2 @@ -37,7 +37,7 @@ def test_fingerprint_changes_due_to_class(): def test_fingerprint_changes_due_to_config(): key1 = fingerprinting.calculate_fingerprint_key( - TEDPolicy, {}, {"input": FingerprintableText("Hi")}, + TEDPolicy, {}, {"input": FingerprintableText("Hi")} ) key2 = fingerprinting.calculate_fingerprint_key( ResponseSelector, @@ -50,7 +50,7 @@ def test_fingerprint_changes_due_to_config(): def test_fingerprint_changes_due_to_inputs(): key1 = fingerprinting.calculate_fingerprint_key( - TEDPolicy, {}, {"input": FingerprintableText("Hi")}, + TEDPolicy, {}, {"input": FingerprintableText("Hi")} ) key2 = fingerprinting.calculate_fingerprint_key( ResponseSelector, @@ -63,14 +63,14 @@ def test_fingerprint_changes_due_to_inputs(): def test_fingerprint_changes_due_to_changed_source(monkeypatch: MonkeyPatch): key1 = fingerprinting.calculate_fingerprint_key( - TEDPolicy, {}, {"input": FingerprintableText("Hi")}, + TEDPolicy, {}, {"input": FingerprintableText("Hi")} ) get_source_mock = Mock(return_value="other implementation") monkeypatch.setattr(inspect, inspect.getsource.__name__, get_source_mock) key2 = fingerprinting.calculate_fingerprint_key( - TEDPolicy, {}, {"input": FingerprintableText("Hi")}, + TEDPolicy, {}, {"input": FingerprintableText("Hi")} ) assert key1 != key2 diff --git a/tests/engine/training/test_graph_trainer.py b/tests/engine/training/test_graph_trainer.py index 9b5b7b6da972..b2a20d07a08b 100644 --- a/tests/engine/training/test_graph_trainer.py +++ b/tests/engine/training/test_graph_trainer.py @@ -59,7 +59,7 @@ def test_graph_trainer_returns_model_metadata( uses=PersistableTestComponent, fn="train", constructor_name="create", - config={"test_value": test_value,}, + config={"test_value": test_value}, is_target=True, ), "load": SchemaNode( @@ -81,7 +81,7 @@ def test_graph_trainer_returns_model_metadata( config={}, is_target=True, resource=Resource("train"), - ), + ) } ) @@ -246,21 +246,13 @@ def test_graph_trainer_always_reads_input( # The first train should call all the components and cache their outputs. mocks = spy_on_all_components(train_schema) train_with_schema(train_schema, temp_cache) - assert node_call_counts(mocks) == { - "read_file": 1, - "subtract": 1, - "assert_node": 1, - } + assert node_call_counts(mocks) == {"read_file": 1, "subtract": 1, "assert_node": 1} # Nothing has changed so this time so no components will run # (just input nodes during fingerprint run). mocks = spy_on_all_components(train_schema) train_with_schema(train_schema, temp_cache) - assert node_call_counts(mocks) == { - "read_file": 1, - "subtract": 0, - "assert_node": 0, - } + assert node_call_counts(mocks) == {"read_file": 1, "subtract": 0, "assert_node": 0} # When we update the input file, all the nodes will run again and the assert_node # will fail. @@ -302,19 +294,13 @@ def test_graph_trainer_with_non_cacheable_components( # The first train should call all the components. mocks = spy_on_all_components(train_schema) train_with_schema(train_schema, temp_cache) - assert node_call_counts(mocks) == { - "input": 1, - "subtract": 1, - } + assert node_call_counts(mocks) == {"input": 1, "subtract": 1} # Nothing has changed but none of the components can cache so all will have to # run again. mocks = spy_on_all_components(train_schema) train_with_schema(train_schema, temp_cache) - assert node_call_counts(mocks) == { - "input": 1, - "subtract": 1, - } + assert node_call_counts(mocks) == {"input": 1, "subtract": 1} def node_call_counts(mocks: Dict[Text, Mock]) -> Dict[Text, int]: @@ -345,9 +331,7 @@ def inner( cache = local_cache_creator(path) graph_trainer = GraphTrainer( - model_storage=model_storage, - cache=cache, - graph_runner_class=DaskGraphRunner, + model_storage=model_storage, cache=cache, graph_runner_class=DaskGraphRunner ) output_filename = path / "model.tar.gz" @@ -495,7 +479,7 @@ def test_graph_trainer_train_logging_with_cached_components( def test_resources_fingerprints_are_unique_when_cached( - temp_cache: LocalTrainingCache, train_with_schema: Callable, + temp_cache: LocalTrainingCache, train_with_schema: Callable ): train_schema = GraphSchema( { @@ -542,7 +526,7 @@ def test_resources_fingerprints_are_unique_when_cached( def test_resources_fingerprints_remain_after_being_cached( - temp_cache: LocalTrainingCache, train_with_schema: Callable, + temp_cache: LocalTrainingCache, train_with_schema: Callable ): train_schema = GraphSchema( { @@ -613,7 +597,7 @@ def test_exception_handling_for_on_before_hook( default_execution_context: ExecutionContext, ): schema_node = SchemaNode( - needs={}, uses=ProvideX, fn="provide", constructor_name="create", config={}, + needs={}, uses=ProvideX, fn="provide", constructor_name="create", config={} ) class MyHook(GraphNodeHook): diff --git a/tests/engine/training/test_hooks.py b/tests/engine/training/test_hooks.py index d248048463a1..13d9fda0949f 100644 --- a/tests/engine/training/test_hooks.py +++ b/tests/engine/training/test_hooks.py @@ -4,14 +4,11 @@ from rasa.engine.training import fingerprinting from rasa.engine.training.components import PrecomputedValueProvider from rasa.engine.training.hooks import TrainingHook -from tests.engine.graph_components_test_classes import ( - CacheableComponent, - CacheableText, -) +from tests.engine.graph_components_test_classes import CacheableComponent, CacheableText def test_training_hook_saves_to_cache( - default_model_storage: ModelStorage, temp_cache: TrainingCache, + default_model_storage: ModelStorage, temp_cache: TrainingCache ): # We need an execution context so the hook can determine the class of the graph # component @@ -71,7 +68,7 @@ def test_training_hook_saves_to_cache( def test_training_hook_does_not_cache_cached_component( - default_model_storage: ModelStorage, temp_cache: TrainingCache, + default_model_storage: ModelStorage, temp_cache: TrainingCache ): # We need an execution context so the hook can determine the class of the graph # component diff --git a/tests/graph_components/converters/test_nlu_message_converter.py b/tests/graph_components/converters/test_nlu_message_converter.py index f3a0e891dfa3..ec1d2d3d9649 100644 --- a/tests/graph_components/converters/test_nlu_message_converter.py +++ b/tests/graph_components/converters/test_nlu_message_converter.py @@ -8,7 +8,7 @@ def test_nlu_message_converter_converts_message( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, default_execution_context: ExecutionContext ): component = NLUMessageConverter.create( {**NLUMessageConverter.get_default_config()}, @@ -28,10 +28,10 @@ def test_nlu_message_converter_converts_message( def test_nlu_message_converter_converts_message_with_metadata( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, default_execution_context: ExecutionContext ): component = NLUMessageConverter.create( - {}, default_model_storage, Resource("with_metadata"), default_execution_context, + {}, default_model_storage, Resource("with_metadata"), default_execution_context ) message = UserMessage(text="Hello", metadata={"test_key": "test_value"}) @@ -44,7 +44,7 @@ def test_nlu_message_converter_converts_message_with_metadata( def test_nlu_message_converter_handles_no_user_message( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, default_execution_context: ExecutionContext ): component = NLUMessageConverter.create( {}, diff --git a/tests/graph_components/providers/test_story_graph_provider.py b/tests/graph_components/providers/test_story_graph_provider.py index 7701535619eb..6824bb0aa81a 100644 --- a/tests/graph_components/providers/test_story_graph_provider.py +++ b/tests/graph_components/providers/test_story_graph_provider.py @@ -10,7 +10,7 @@ @pytest.mark.parametrize( - "config", [{}, {"exclusion_percentage": None}, {"exclusion_percentage": 25},], + "config", [{}, {"exclusion_percentage": None}, {"exclusion_percentage": 25}] ) def test_story_graph_provider_provide( default_model_storage: ModelStorage, diff --git a/tests/graph_components/validators/test_default_recipe_validator.py b/tests/graph_components/validators/test_default_recipe_validator.py index 74738089eb7e..a6c15b1e9b6b 100644 --- a/tests/graph_components/validators/test_default_recipe_validator.py +++ b/tests/graph_components/validators/test_default_recipe_validator.py @@ -115,10 +115,10 @@ def _test_validation_warnings_with_default_configs( @pytest.mark.parametrize( - "component_type, warns", [(ResponseSelector, False,), (None, True)] + "component_type, warns", [(ResponseSelector, False), (None, True)] ) def test_nlu_warn_if_training_examples_with_intent_response_key_are_unused( - component_type: Type[GraphComponent], warns: bool, + component_type: Type[GraphComponent], warns: bool ): messages = [ Message( @@ -154,7 +154,7 @@ def test_nlu_warn_if_training_examples_with_intent_response_key_are_unused( if component_type: component_types.append(component_type) _test_validation_warnings_with_default_configs( - training_data=training_data, component_types=component_types, warnings=warnings, + training_data=training_data, component_types=component_types, warnings=warnings ) @@ -163,7 +163,7 @@ def test_nlu_warn_if_training_examples_with_intent_response_key_are_unused( [(extractor, False) for extractor in TRAINABLE_EXTRACTORS] + [(None, True)], ) def test_nlu_warn_if_training_examples_with_entities_are_unused( - component_type: Type[GraphComponent], warns: bool, + component_type: Type[GraphComponent], warns: bool ): messages = [ Message( @@ -192,7 +192,7 @@ def test_nlu_warn_if_training_examples_with_entities_are_unused( if component_type: component_types.append(component_type) _test_validation_warnings_with_default_configs( - training_data=training_data, component_types=component_types, warnings=warnings, + training_data=training_data, component_types=component_types, warnings=warnings ) @@ -209,7 +209,7 @@ def test_nlu_warn_if_training_examples_with_entities_are_unused( ], ) def test_nlu_warn_if_training_examples_with_entity_roles_are_unused( - component_type: Type[GraphComponent], role_instead_of_group: bool, warns: bool, + component_type: Type[GraphComponent], role_instead_of_group: bool, warns: bool ): messages = [ Message( @@ -243,13 +243,13 @@ def test_nlu_warn_if_training_examples_with_entity_roles_are_unused( if component_type: component_types.append(component_type) _test_validation_warnings_with_default_configs( - training_data=training_data, component_types=component_types, warnings=warnings, + training_data=training_data, component_types=component_types, warnings=warnings ) @pytest.mark.parametrize( "component_type, warns", - [(RegexFeaturizer, False), (RegexEntityExtractor, False), (None, True),], + [(RegexFeaturizer, False), (RegexEntityExtractor, False), (None, True)], ) def test_nlu_warn_if_regex_features_are_not_used( component_type: Type[GraphComponent], warns: bool @@ -280,7 +280,7 @@ def test_nlu_warn_if_regex_features_are_not_used( + [ (featurizer, consumer, False, False) for consumer in [DIETClassifier, CRFEntityExtractor] - for featurizer in [RegexFeaturizer, RegexEntityExtractor,] + for featurizer in [RegexFeaturizer, RegexEntityExtractor] ], ) def test_nlu_warn_if_lookup_table_is_not_used( @@ -326,7 +326,7 @@ def test_nlu_warn_if_lookup_table_is_not_used( [ SchemaNode({}, WhitespaceTokenizer, "", "", {}), SchemaNode({}, RegexFeaturizer, "", "", {}), - SchemaNode({}, CRFEntityExtractor, "", "", {"features": [["pos"]]},), + SchemaNode({}, CRFEntityExtractor, "", "", {"features": [["pos"]]}), ], True, ), @@ -348,7 +348,7 @@ def test_nlu_warn_if_lookup_table_is_not_used( [ SchemaNode({}, WhitespaceTokenizer, "", "", {}), SchemaNode({}, RegexFeaturizer, "", "", {}), - SchemaNode({}, CRFEntityExtractor, "", "", {"features": [["pos"]]},), + SchemaNode({}, CRFEntityExtractor, "", "", {"features": [["pos"]]}), SchemaNode( {}, CRFEntityExtractor, @@ -393,8 +393,8 @@ def test_nlu_warn_if_lookup_table_and_crf_extractor_pattern_feature_mismatch( @pytest.mark.parametrize( "components, warns", [ - ([WhitespaceTokenizer, CRFEntityExtractor,], True,), - ([WhitespaceTokenizer, CRFEntityExtractor, EntitySynonymMapper,], False,), + ([WhitespaceTokenizer, CRFEntityExtractor], True), + ([WhitespaceTokenizer, CRFEntityExtractor, EntitySynonymMapper], False), ], ) def test_nlu_warn_if_entity_synonyms_unused( @@ -494,7 +494,7 @@ def test_nlu_do_not_raise_if_trainable_tokenizer(): ], True, ), - ([WhitespaceTokenizer, LexicalSyntacticFeaturizer, DIETClassifier,], False,), + ([WhitespaceTokenizer, LexicalSyntacticFeaturizer, DIETClassifier], False), ], ) def test_nlu_warn_of_competing_extractors( @@ -531,12 +531,12 @@ def test_nlu_warn_of_competing_extractors( True, ), ( - [WhitespaceTokenizer, LexicalSyntacticFeaturizer, RegexEntityExtractor,], + [WhitespaceTokenizer, LexicalSyntacticFeaturizer, RegexEntityExtractor], "data/test/overlapping_regex_entities.yml", False, ), ( - [WhitespaceTokenizer, LexicalSyntacticFeaturizer, DIETClassifier,], + [WhitespaceTokenizer, LexicalSyntacticFeaturizer, DIETClassifier], "data/test/overlapping_regex_entities.yml", False, ), @@ -558,7 +558,7 @@ def test_nlu_warn_of_competition_with_regex_extractor( data_path: Text, should_warn: bool, ): - importer = TrainingDataImporter.load_from_dict(training_data_paths=[data_path],) + importer = TrainingDataImporter.load_from_dict(training_data_paths=[data_path]) # there are no domain files for the above examples, so: monkeypatch.setattr(Domain, "check_missing_responses", lambda *args, **kwargs: None) @@ -689,9 +689,7 @@ def test_nlu_raise_if_featurizers_are_not_compatible( validator.validate(importer) -@pytest.mark.parametrize( - "policy_type", [TEDPolicy, RulePolicy, MemoizationPolicy,], -) +@pytest.mark.parametrize("policy_type", [TEDPolicy, RulePolicy, MemoizationPolicy]) def test_core_warn_if_data_but_no_policy( monkeypatch: MonkeyPatch, policy_type: Optional[Type[Policy]] ): @@ -753,7 +751,7 @@ def test_core_warn_if_data_but_no_policy( ], ) def test_core_warn_if_no_rule_policy( - monkeypatch: MonkeyPatch, policy_types: List[Type[Policy]], should_warn: bool, + monkeypatch: MonkeyPatch, policy_types: List[Type[Policy]], should_warn: bool ): graph_schema = GraphSchema( { @@ -795,7 +793,7 @@ def test_core_warn_if_no_rule_policy( ], ) def test_core_raise_if_domain_contains_form_names_but_no_rule_policy_given( - monkeypatch: MonkeyPatch, policy_types: List[Type[Policy]], should_raise: bool, + monkeypatch: MonkeyPatch, policy_types: List[Type[Policy]], should_raise: bool ): domain_with_form = Domain.from_dict( {KEY_FORMS: {"some-form": {"required_slots": []}}} @@ -919,7 +917,7 @@ def test_core_warn_if_policy_priorities_are_not_unique( @pytest.mark.parametrize("policy_type_consuming_rule_data", [RulePolicy]) -def test_core_warn_if_rule_data_missing(policy_type_consuming_rule_data: Type[Policy],): +def test_core_warn_if_rule_data_missing(policy_type_consuming_rule_data: Type[Policy]): importer = TrainingDataImporter.load_from_dict( domain_path="data/test_e2ebot/domain.yml", @@ -945,7 +943,7 @@ def test_core_warn_if_rule_data_missing(policy_type_consuming_rule_data: Type[Po @pytest.mark.parametrize( - "policy_type_not_consuming_rule_data", [TEDPolicy, MemoizationPolicy,], + "policy_type_not_consuming_rule_data", [TEDPolicy, MemoizationPolicy] ) def test_core_warn_if_rule_data_unused( policy_type_not_consuming_rule_data: Type[Policy], diff --git a/tests/graph_components/validators/test_finetuning_validator.py b/tests/graph_components/validators/test_finetuning_validator.py index fef3da5bcaee..627fa6ff9285 100644 --- a/tests/graph_components/validators/test_finetuning_validator.py +++ b/tests/graph_components/validators/test_finetuning_validator.py @@ -70,7 +70,7 @@ def inner( @pytest.fixture def get_validation_method( - get_finetuning_validator: Callable[[bool, bool], FinetuningValidator], + get_finetuning_validator: Callable[[bool, bool], FinetuningValidator] ) -> Callable[[bool, bool, bool, bool, GraphSchema], ValidationMethodType]: def inner( finetuning: bool, @@ -209,7 +209,7 @@ def _get_example_schema(num_epochs: int = 5, other_parameter: int = 10) -> Graph @pytest.mark.parametrize("nlu, core", [(True, False), (False, True), (True, True)]) def test_validate_after_changing_epochs_in_config( - get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, + get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool ): # training schema1 = _get_example_schema(num_epochs=5) @@ -232,7 +232,7 @@ def test_validate_after_changing_epochs_in_config( @pytest.mark.parametrize("nlu, core", [(True, False), (False, True), (True, True)]) def test_validate_after_changing_constructor( - get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, + get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool ): # training schema1 = _get_example_schema(num_epochs=5) @@ -253,7 +253,7 @@ def test_validate_after_changing_constructor( @pytest.mark.parametrize("nlu, core", [(True, False), (False, True), (True, True)]) def test_validate_after_removing_node_from_schema( - get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, + get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool ): # training schema1 = _get_example_schema(num_epochs=5) @@ -268,7 +268,7 @@ def test_validate_after_removing_node_from_schema( # finetuning raises - doesn't matter if it's nlu/core/both loaded_validate = get_validation_method( - finetuning=True, load=True, nlu=nlu, core=core, graph_schema=schema2, + finetuning=True, load=True, nlu=nlu, core=core, graph_schema=schema2 ) with pytest.raises(InvalidConfigException): loaded_validate(importer=EmptyDataImporter()) @@ -276,7 +276,7 @@ def test_validate_after_removing_node_from_schema( @pytest.mark.parametrize("nlu, core", [(True, False), (False, True), (True, True)]) def test_validate_after_adding_node_to_schema( - get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, + get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool ): # training schema1 = _get_example_schema() @@ -344,7 +344,7 @@ def test_validate_after_replacing_something_in_schema( @pytest.mark.parametrize("nlu, core", [(True, False), (False, True), (True, True)]) def test_validate_after_adding_adding_default_parameter( - get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, + get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool ): # create a schema and rely on rasa to fill in defaults later schema1 = _get_example_schema() @@ -399,10 +399,7 @@ def test_validate_after_removing_or_adding_intent_or_action_name( core: bool, key: Text, ): - messages = [ - Message(data={key: "item-1"}), - Message(data={key: "item-2"}), - ] + messages = [Message(data={key: "item-1"}), Message(data={key: "item-2"})] message_with_new_item = Message(data={key: "item-3"}) # training @@ -513,7 +510,7 @@ def test_validate_with_other_version( @pytest.mark.parametrize("nlu, core", [(True, False), (False, True), (True, True)]) def test_validate_with_finetuning_fails_without_training( - get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, + get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool ): validate = get_validation_method(finetuning=True, load=False, nlu=nlu, core=core) with pytest.raises(InvalidConfigException): @@ -523,7 +520,7 @@ def test_validate_with_finetuning_fails_without_training( def test_loading_without_persisting( get_finetuning_validator: Callable[ [bool, bool, Dict[Text, bool]], FinetuningValidator - ], + ] ): with pytest.raises(ValueError): get_finetuning_validator(finetuning=False, load=True, config={}) diff --git a/tests/nlu/classifiers/test_diet_classifier.py b/tests/nlu/classifiers/test_diet_classifier.py index c9a49b80f59b..99f9d69cf39a 100644 --- a/tests/nlu/classifiers/test_diet_classifier.py +++ b/tests/nlu/classifiers/test_diet_classifier.py @@ -223,7 +223,7 @@ def test_compute_default_label_features(): ], ) def test_check_labels_features_exist( - messages: List[Message], expected: bool, create_diet: Callable[..., DIETClassifier], + messages: List[Message], expected: bool, create_diet: Callable[..., DIETClassifier] ): attribute = TEXT classifier = create_diet({}) @@ -242,7 +242,7 @@ def test_check_labels_features_exist( ENTITIES: [ {"start": 0, "end": 4, "value": "test", "entity": "test"} ], - }, + } ), Message( data={ @@ -251,15 +251,15 @@ def test_check_labels_features_exist( ENTITIES: [ {"start": 0, "end": 4, "value": "test", "entity": "test"} ], - }, + } ), ], True, ), ( [ - Message(data={TEXT: "test a", INTENT: "intent a"},), - Message(data={TEXT: "test b", INTENT: "intent b"},), + Message(data={TEXT: "test a", INTENT: "intent a"}), + Message(data={TEXT: "test b", INTENT: "intent b"}), ], False, ), @@ -341,7 +341,7 @@ async def test_train_persist_load_with_only_intent_classification( create_diet: Callable[..., DIETClassifier], ): create_train_load_and_process_diet( - {ENTITY_RECOGNITION: False, INTENT_CLASSIFICATION: True, EPOCHS: 1,}, + {ENTITY_RECOGNITION: False, INTENT_CLASSIFICATION: True, EPOCHS: 1} ) create_diet({MASKED_LM: True, EPOCHS: 1}, load=True, finetune=True) @@ -450,16 +450,16 @@ async def test_set_random_seed( """test if train result is the same for two runs of tf embedding""" parsed_message1 = create_train_load_and_process_diet( - {ENTITY_RECOGNITION: False, RANDOM_SEED: 1, EPOCHS: 1}, + {ENTITY_RECOGNITION: False, RANDOM_SEED: 1, EPOCHS: 1} ) parsed_message2 = create_train_load_and_process_diet( - {ENTITY_RECOGNITION: False, RANDOM_SEED: 1, EPOCHS: 1}, + {ENTITY_RECOGNITION: False, RANDOM_SEED: 1, EPOCHS: 1} ) # Different random seed parsed_message3 = create_train_load_and_process_diet( - {ENTITY_RECOGNITION: False, RANDOM_SEED: 2, EPOCHS: 1}, + {ENTITY_RECOGNITION: False, RANDOM_SEED: 2, EPOCHS: 1} ) assert ( @@ -519,7 +519,7 @@ async def test_train_model_checkpointing( create_train_load_and_process_diet: Callable[..., Message], ): create_train_load_and_process_diet( - {EPOCHS: 2, EVAL_NUM_EPOCHS: 1, EVAL_NUM_EXAMPLES: 10, CHECKPOINT_MODEL: True}, + {EPOCHS: 2, EVAL_NUM_EPOCHS: 1, EVAL_NUM_EXAMPLES: 10, CHECKPOINT_MODEL: True} ) with default_model_storage.read_from(default_diet_resource) as model_dir: @@ -686,12 +686,12 @@ async def test_adjusting_layers_incremental_training( train_load_and_process_diet: Callable[..., Message], ): """Tests adjusting sparse layers of `DIETClassifier` to increased sparse - feature sizes during incremental training. + feature sizes during incremental training. - Testing is done by checking the layer sizes. - Checking if they were replaced correctly is also important - and is done in `test_replace_dense_for_sparse_layers` - in `test_rasa_layers.py`. + Testing is done by checking the layer sizes. + Checking if they were replaced correctly is also important + and is done in `test_replace_dense_for_sparse_layers` + in `test_rasa_layers.py`. """ iter1_data_path = "data/test_incremental_training/iter1/" iter2_data_path = "data/test_incremental_training/" @@ -709,7 +709,7 @@ async def test_adjusting_layers_incremental_training( ] classifier = create_diet({EPOCHS: 1}) processed_message = train_load_and_process_diet( - classifier, pipeline=pipeline, training_data=iter1_data_path, + classifier, pipeline=pipeline, training_data=iter1_data_path ) old_data_signature = classifier.model.data_signature @@ -739,7 +739,7 @@ async def test_adjusting_layers_incremental_training( finetune_classifier = create_diet({EPOCHS: 1}, load=True, finetune=True) assert finetune_classifier.finetune_mode processed_message_finetuned = train_load_and_process_diet( - finetune_classifier, pipeline=pipeline, training_data=iter2_data_path, + finetune_classifier, pipeline=pipeline, training_data=iter2_data_path ) new_sparse_feature_sizes = processed_message_finetuned.get_sparse_feature_sizes( @@ -855,9 +855,7 @@ async def test_sparse_feature_sizes_decreased_incremental_training( classifier = create_diet({EPOCHS: 1}) assert not classifier.finetune_mode - train_load_and_process_diet( - classifier, pipeline=pipeline, training_data=iter1_path, - ) + train_load_and_process_diet(classifier, pipeline=pipeline, training_data=iter1_path) finetune_classifier = create_diet({EPOCHS: 1}, load=True, finetune=True) assert finetune_classifier.finetune_mode @@ -865,10 +863,10 @@ async def test_sparse_feature_sizes_decreased_incremental_training( if should_raise_exception: with pytest.raises(Exception) as exec_info: train_load_and_process_diet( - finetune_classifier, pipeline=pipeline, training_data=iter2_path, + finetune_classifier, pipeline=pipeline, training_data=iter2_path ) assert "Sparse feature sizes have decreased" in str(exec_info.value) else: train_load_and_process_diet( - finetune_classifier, pipeline=pipeline, training_data=iter2_path, + finetune_classifier, pipeline=pipeline, training_data=iter2_path ) diff --git a/tests/nlu/classifiers/test_fallback_classifier.py b/tests/nlu/classifiers/test_fallback_classifier.py index a3e80538b9ac..9262726836a9 100644 --- a/tests/nlu/classifiers/test_fallback_classifier.py +++ b/tests/nlu/classifiers/test_fallback_classifier.py @@ -185,7 +185,7 @@ def test_not_predict_fallback_intent( def test_defaults( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, default_execution_context: ExecutionContext ): classifier = create_fallback_classifier( {}, default_model_storage, default_execution_context diff --git a/tests/nlu/classifiers/test_keyword_classifier.py b/tests/nlu/classifiers/test_keyword_classifier.py index b2df98b30808..d6d285736ae9 100644 --- a/tests/nlu/classifiers/test_keyword_classifier.py +++ b/tests/nlu/classifiers/test_keyword_classifier.py @@ -22,7 +22,7 @@ def training_data(nlu_as_json_path: Text): @pytest.fixture() def default_keyword_intent_classifier( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, default_execution_context: ExecutionContext ): return KeywordIntentClassifier.create( KeywordIntentClassifier.get_default_config(), @@ -42,12 +42,12 @@ def test_persist_and_load( default_execution_context: ExecutionContext, ): classifier = KeywordIntentClassifier.create( - config, default_model_storage, Resource("keyword"), default_execution_context, + config, default_model_storage, Resource("keyword"), default_execution_context ) classifier.train(training_data) loaded_classifier = KeywordIntentClassifier.load( - config, default_model_storage, Resource("keyword"), default_execution_context, + config, default_model_storage, Resource("keyword"), default_execution_context ) predicted = copy.copy(training_data) @@ -85,7 +85,7 @@ def test_classification( assert m.get("intent").get("name", "NOT_CLASSIFIED") == intent -def test_valid_data(default_keyword_intent_classifier: KeywordIntentClassifier,): +def test_valid_data(default_keyword_intent_classifier: KeywordIntentClassifier): json_data = { "rasa_nlu_data": { "common_examples": [ @@ -106,7 +106,7 @@ def test_valid_data(default_keyword_intent_classifier: KeywordIntentClassifier,) @pytest.mark.filterwarnings("ignore:Keyword.* of keywords:UserWarning") -def test_identical_data(default_keyword_intent_classifier: KeywordIntentClassifier,): +def test_identical_data(default_keyword_intent_classifier: KeywordIntentClassifier): json_data = { "rasa_nlu_data": { "common_examples": [ @@ -128,7 +128,7 @@ def test_identical_data(default_keyword_intent_classifier: KeywordIntentClassifi @pytest.mark.filterwarnings("ignore:Keyword.* of keywords:UserWarning") -def test_ambiguous_data(default_keyword_intent_classifier: KeywordIntentClassifier,): +def test_ambiguous_data(default_keyword_intent_classifier: KeywordIntentClassifier): json_data = { "rasa_nlu_data": { "common_examples": [ diff --git a/tests/nlu/classifiers/test_regex_message_handler.py b/tests/nlu/classifiers/test_regex_message_handler.py index 14d990d44db1..fd07337f30a8 100644 --- a/tests/nlu/classifiers/test_regex_message_handler.py +++ b/tests/nlu/classifiers/test_regex_message_handler.py @@ -11,12 +11,7 @@ from rasa.nlu.classifiers.regex_message_handler import RegexMessageHandler import pytest from rasa.shared.constants import INTENT_MESSAGE_PREFIX -from rasa.shared.nlu.constants import ( - FEATURE_TYPE_SENTENCE, - INTENT, - TEXT, - ENTITIES, -) +from rasa.shared.nlu.constants import FEATURE_TYPE_SENTENCE, INTENT, TEXT, ENTITIES @pytest.fixture diff --git a/tests/nlu/classifiers/test_sklearn_classifier.py b/tests/nlu/classifiers/test_sklearn_classifier.py index 23bf14293c59..3233de4c4730 100644 --- a/tests/nlu/classifiers/test_sklearn_classifier.py +++ b/tests/nlu/classifiers/test_sklearn_classifier.py @@ -23,7 +23,7 @@ def training_data(nlu_data_path: Text) -> TrainingData: @pytest.fixture() def default_sklearn_intent_classifier( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, default_execution_context: ExecutionContext ): return SklearnIntentClassifier.create( SklearnIntentClassifier.get_default_config(), @@ -47,7 +47,7 @@ def test_persist_and_load( ) training_data, loaded_pipeline = train_and_preprocess( - pipeline=[{"component": SpacyTokenizer}, {"component": SpacyFeaturizer},], + pipeline=[{"component": SpacyTokenizer}, {"component": SpacyFeaturizer}], training_data=training_data, ) default_sklearn_intent_classifier.train(training_data) diff --git a/tests/nlu/conftest.py b/tests/nlu/conftest.py index 0a305e93589c..da38d044ac8d 100644 --- a/tests/nlu/conftest.py +++ b/tests/nlu/conftest.py @@ -36,7 +36,7 @@ def train_and_preprocess( default_model_storage: ModelStorage, ) -> Callable[..., Tuple[TrainingData, List[GraphComponent]]]: def inner( - pipeline: List[Dict[Text, Any]], training_data: Union[Text, TrainingData], + pipeline: List[Dict[Text, Any]], training_data: Union[Text, TrainingData] ) -> Tuple[TrainingData, List[GraphComponent]]: if isinstance(training_data, str): @@ -73,8 +73,8 @@ def create_component( @pytest.fixture() -def process_message(default_model_storage: ModelStorage,) -> Callable[..., Message]: - def inner(loaded_pipeline: List[GraphComponent], message: Message,) -> Message: +def process_message(default_model_storage: ModelStorage) -> Callable[..., Message]: + def inner(loaded_pipeline: List[GraphComponent], message: Message) -> Message: for component in loaded_pipeline: component.process([message]) @@ -91,9 +91,7 @@ def spacy_tokenizer() -> SpacyTokenizer: @pytest.fixture() def spacy_featurizer() -> SpacyFeaturizer: - return SpacyFeaturizer( - SpacyFeaturizer.get_default_config(), name="SpacyFeaturizer", - ) + return SpacyFeaturizer(SpacyFeaturizer.get_default_config(), name="SpacyFeaturizer") @pytest.fixture() diff --git a/tests/nlu/extractors/test_duckling_entity_extractor.py b/tests/nlu/extractors/test_duckling_entity_extractor.py index babd487191f0..578e42fdfe61 100644 --- a/tests/nlu/extractors/test_duckling_entity_extractor.py +++ b/tests/nlu/extractors/test_duckling_entity_extractor.py @@ -13,7 +13,7 @@ @pytest.fixture() def create_duckling( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, default_execution_context: ExecutionContext ) -> Callable[[Dict[Text, Any]], DucklingEntityExtractor]: def inner(config: Dict[Text, Any]) -> DucklingEntityExtractor: return DucklingEntityExtractor.create( diff --git a/tests/nlu/extractors/test_extractor.py b/tests/nlu/extractors/test_extractor.py index 888e9bdeecae..edbba25ec3b3 100644 --- a/tests/nlu/extractors/test_extractor.py +++ b/tests/nlu/extractors/test_extractor.py @@ -252,9 +252,9 @@ def test_convert_tags_to_entities( "address", "address", "address", - ], + ] }, - {"entity": [1.0, 1.0, 1.0, 1.0, 0.98, 0.78, 1.0, 0.89, 1.0, 1.0, 1.0],}, + {"entity": [1.0, 1.0, 1.0, 1.0, 0.98, 0.78, 1.0, 0.89, 1.0, 1.0, 1.0]}, [ { "entity": "address", diff --git a/tests/nlu/extractors/test_mitie_entity_extractor.py b/tests/nlu/extractors/test_mitie_entity_extractor.py index ec533064f0f0..d126a3ed4f7e 100644 --- a/tests/nlu/extractors/test_mitie_entity_extractor.py +++ b/tests/nlu/extractors/test_mitie_entity_extractor.py @@ -61,7 +61,7 @@ def inner(config: Dict[Text, Any], load: bool = False) -> MitieEntityExtractor: model_storage=default_model_storage, execution_context=default_execution_context, resource=Resource("MitieEntityExtractor"), - config={**MitieEntityExtractor.get_default_config(), **config,}, + config={**MitieEntityExtractor.get_default_config(), **config}, ) return inner diff --git a/tests/nlu/extractors/test_regex_entity_extractor.py b/tests/nlu/extractors/test_regex_entity_extractor.py index 94423c4cad38..4b4346957b06 100644 --- a/tests/nlu/extractors/test_regex_entity_extractor.py +++ b/tests/nlu/extractors/test_regex_entity_extractor.py @@ -116,7 +116,7 @@ def inner(config: Dict[Text, Any], load: bool = False) -> RegexEntityExtractor: ( {"use_word_boundaries": False}, "北京和上海都是大城市。", - [{"name": "city", "elements": ["北京", "上海", "广州", "深圳", "杭州"],}], + [{"name": "city", "elements": ["北京", "上海", "广州", "深圳", "杭州"]}], [ { ENTITY_ATTRIBUTE_TYPE: "city", @@ -139,7 +139,7 @@ def inner(config: Dict[Text, Any], load: bool = False) -> RegexEntityExtractor: {"use_word_boundaries": False}, "小明正要去北京拜访老李。", [ - {"name": "city", "elements": ["北京", "上海", "广州", "深圳", "杭州"],}, + {"name": "city", "elements": ["北京", "上海", "广州", "深圳", "杭州"]}, {"name": "person", "elements": ["小明", "小红", "小王", "小李"]}, ], [ @@ -164,7 +164,7 @@ def inner(config: Dict[Text, Any], load: bool = False) -> RegexEntityExtractor: {"use_word_boundaries": False}, "Rasa 真好用。", [ - {"name": "city", "elements": ["北京", "上海", "广州", "深圳", "杭州"],}, + {"name": "city", "elements": ["北京", "上海", "广州", "深圳", "杭州"]}, {"name": "person", "elements": ["小明", "小红", "小王", "小李"]}, ], [], @@ -264,7 +264,7 @@ def test_train_and_process( def test_train_process_and_load_with_empty_model( - create_or_load_extractor: Callable[..., RegexEntityExtractor], + create_or_load_extractor: Callable[..., RegexEntityExtractor] ): extractor = create_or_load_extractor({}) with pytest.warns(UserWarning): @@ -276,7 +276,7 @@ def test_train_process_and_load_with_empty_model( def test_process_does_not_overwrite_any_entities( - create_or_load_extractor: Callable[..., RegexEntityExtractor], + create_or_load_extractor: Callable[..., RegexEntityExtractor] ): pre_existing_entity = { diff --git a/tests/nlu/featurizers/test_convert_featurizer.py b/tests/nlu/featurizers/test_convert_featurizer.py index e75abf39df7d..251d1371b803 100644 --- a/tests/nlu/featurizers/test_convert_featurizer.py +++ b/tests/nlu/featurizers/test_convert_featurizer.py @@ -27,7 +27,7 @@ @pytest.fixture def create_or_load_convert_featurizer( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, default_execution_context: ExecutionContext ) -> Callable[[Dict[Text, Any], bool], ConveRTFeaturizer]: def inner( config: Dict[Text, Any], load: bool = False @@ -54,7 +54,7 @@ def test_convert_featurizer_process( ): monkeypatch.setattr( - ConveRTFeaturizer, "_validate_model_url", lambda _: RESTRICTED_ACCESS_URL, + ConveRTFeaturizer, "_validate_model_url", lambda _: RESTRICTED_ACCESS_URL ) component_config = { FEATURIZER_CLASS_ALIAS: "alias", @@ -94,9 +94,7 @@ def test_convert_featurizer_train( whitespace_tokenizer: WhitespaceTokenizer, ): - monkeypatch.setattr( - ConveRTFeaturizer, "_validate_model_url", lambda _: None, - ) + monkeypatch.setattr(ConveRTFeaturizer, "_validate_model_url", lambda _: None) component_config = { FEATURIZER_CLASS_ALIAS: "alias", "model_url": RESTRICTED_ACCESS_URL, @@ -165,9 +163,7 @@ def test_convert_featurizer_tokens_to_text( whitespace_tokenizer: WhitespaceTokenizer, ): - monkeypatch.setattr( - ConveRTFeaturizer, "_validate_model_url", lambda _: None, - ) + monkeypatch.setattr(ConveRTFeaturizer, "_validate_model_url", lambda _: None) component_config = { FEATURIZER_CLASS_ALIAS: "alias", "model_url": RESTRICTED_ACCESS_URL, @@ -208,9 +204,7 @@ def test_convert_featurizer_token_edge_cases( whitespace_tokenizer: WhitespaceTokenizer, ): - monkeypatch.setattr( - ConveRTFeaturizer, "_validate_model_url", lambda _: None, - ) + monkeypatch.setattr(ConveRTFeaturizer, "_validate_model_url", lambda _: None) component_config = { FEATURIZER_CLASS_ALIAS: "alias", "model_url": RESTRICTED_ACCESS_URL, @@ -239,9 +233,7 @@ def test_convert_featurizer_number_of_sub_tokens( whitespace_tokenizer: WhitespaceTokenizer, ): - monkeypatch.setattr( - ConveRTFeaturizer, "_validate_model_url", lambda _: None, - ) + monkeypatch.setattr(ConveRTFeaturizer, "_validate_model_url", lambda _: None) component_config = { FEATURIZER_CLASS_ALIAS: "alias", "model_url": RESTRICTED_ACCESS_URL, @@ -319,7 +311,7 @@ def test_raise_wrong_model_file( @pytest.mark.skip_on_windows def test_raise_invalid_path( - create_or_load_convert_featurizer: Callable[[Dict[Text, Any]], ConveRTFeaturizer], + create_or_load_convert_featurizer: Callable[[Dict[Text, Any]], ConveRTFeaturizer] ): component_config = {FEATURIZER_CLASS_ALIAS: "alias", "model_url": "saved_model.pb"} diff --git a/tests/nlu/featurizers/test_count_vectors_featurizer.py b/tests/nlu/featurizers/test_count_vectors_featurizer.py index d5e189f3b4c3..11d1b3531d60 100644 --- a/tests/nlu/featurizers/test_count_vectors_featurizer.py +++ b/tests/nlu/featurizers/test_count_vectors_featurizer.py @@ -240,7 +240,7 @@ def test_count_vector_featurizer_shared_vocab( create_featurizer: Callable[..., CountVectorsFeaturizer], whitespace_tokenizer: WhitespaceTokenizer, ): - ftr = create_featurizer({"use_shared_vocab": True,}) + ftr = create_featurizer({"use_shared_vocab": True}) train_message = Message(data={TEXT: sentence}) # this is needed for a valid training example @@ -323,7 +323,7 @@ def test_count_vector_featurizer_oov_words( whitespace_tokenizer: WhitespaceTokenizer, ): ftr = create_featurizer( - {"OOV_token": "__oov__", "OOV_words": ["oov_word0", "OOV_word1"],} + {"OOV_token": "__oov__", "OOV_words": ["oov_word0", "OOV_word1"]} ) train_message = Message(data={TEXT: sentence}) whitespace_tokenizer.process([train_message]) @@ -397,7 +397,7 @@ def test_count_vector_featurizer_char( create_featurizer: Callable[..., CountVectorsFeaturizer], whitespace_tokenizer: WhitespaceTokenizer, ): - ftr = create_featurizer({"min_ngram": 1, "max_ngram": 2, "analyzer": "char",}) + ftr = create_featurizer({"min_ngram": 1, "max_ngram": 2, "analyzer": "char"}) train_message = Message(data={TEXT: sentence}) whitespace_tokenizer.process([train_message]) @@ -621,7 +621,7 @@ def test_count_vector_featurizer_action_attribute_featurization( create_featurizer: Callable[..., CountVectorsFeaturizer], whitespace_tokenizer: WhitespaceTokenizer, ): - ftr = create_featurizer({"token_pattern": r"(?u)\b\w+\b",}) + ftr = create_featurizer({"token_pattern": r"(?u)\b\w+\b"}) train_message = Message(data={TEXT: sentence}) # this is needed for a valid training example @@ -687,7 +687,7 @@ def test_count_vector_featurizer_process_by_attribute( create_featurizer: Callable[..., CountVectorsFeaturizer], whitespace_tokenizer: WhitespaceTokenizer, ): - ftr = create_featurizer({"token_pattern": r"(?u)\b\w+\b",}) + ftr = create_featurizer({"token_pattern": r"(?u)\b\w+\b"}) # add a second example that has some response, so that the vocabulary for # response exists @@ -773,7 +773,7 @@ def test_cvf_incremental_training( @pytest.mark.parametrize( "initial_train_text, additional_train_text, " "use_shared_vocab", - [("am I the coolest person?", "no", True), ("rasa rasa", "sara sara", False),], + [("am I the coolest person?", "no", True), ("rasa rasa", "sara sara", False)], ) def test_use_shared_vocab_exception( initial_train_text: Text, diff --git a/tests/nlu/featurizers/test_featurizer.py b/tests/nlu/featurizers/test_featurizer.py index a94b5f7c71bb..4513ac2c1d79 100644 --- a/tests/nlu/featurizers/test_featurizer.py +++ b/tests/nlu/featurizers/test_featurizer.py @@ -39,8 +39,8 @@ np.array([[0.0, 0.0, 0.0, 0.0]]), ), # "max" - special cases to be aware of - ("max", np.array([[-1.0], [0.0]]), False, np.array([[0.0]]),), - ("max", np.array([[-1.0], [0.0]]), True, np.array([[-1.0]]),), + ("max", np.array([[-1.0], [0.0]]), False, np.array([[0.0]])), + ("max", np.array([[-1.0], [0.0]]), True, np.array([[-1.0]])), ], ) def test_calculate_cls_vector(pooling, features, only_non_zero_vectors, expected): diff --git a/tests/nlu/featurizers/test_lexical_syntactic_featurizer.py b/tests/nlu/featurizers/test_lexical_syntactic_featurizer.py index 48f586694d76..499e1f8862e2 100644 --- a/tests/nlu/featurizers/test_lexical_syntactic_featurizer.py +++ b/tests/nlu/featurizers/test_lexical_syntactic_featurizer.py @@ -36,7 +36,7 @@ def create_lexical_syntactic_featurizer( ) -> Callable[[Dict[Text, Any]], LexicalSyntacticFeaturizer]: def inner(config: Dict[Text, Any]): return LexicalSyntacticFeaturizer.create( - config={**LexicalSyntacticFeaturizer.get_default_config(), **config,}, + config={**LexicalSyntacticFeaturizer.get_default_config(), **config}, model_storage=default_model_storage, execution_context=default_execution_context, resource=resource_lexical_syntactic_featurizer, @@ -52,7 +52,7 @@ def inner(config: Dict[Text, Any]): ( "hello goodbye hello", None, - [["BOS", "upper"], ["BOS", "EOS", "prefix2", "digit"], ["EOS", "low"],], + [["BOS", "upper"], ["BOS", "EOS", "prefix2", "digit"], ["EOS", "low"]], [ [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0], @@ -63,7 +63,7 @@ def inner(config: Dict[Text, Any]): ( "a 1", None, - [["BOS", "upper"], ["BOS", "EOS", "prefix2", "digit"], ["EOS", "low"],], + [["BOS", "upper"], ["BOS", "EOS", "prefix2", "digit"], ["EOS", "low"]], [ [0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0], @@ -145,7 +145,7 @@ def test_feature_computation( def test_features_for_messages_with_missing_part_of_speech_tags( create_lexical_syntactic_featurizer: Callable[ [Dict[Text, Any]], LexicalSyntacticFeaturizer - ], + ] ): # build the message and do NOT add part of speech information sentence = "hello goodbye hello" @@ -170,7 +170,7 @@ def test_features_for_messages_with_missing_part_of_speech_tags( def test_only_featurizes_text_attribute( create_lexical_syntactic_featurizer: Callable[ [Dict[Text, Any]], LexicalSyntacticFeaturizer - ], + ] ): # build a message with tokens for lots of attributes sentence = "hello goodbye hello" @@ -197,7 +197,7 @@ def test_only_featurizes_text_attribute( def test_process_multiple_messages( create_lexical_syntactic_featurizer: Callable[ [Dict[Text, Any]], LexicalSyntacticFeaturizer - ], + ] ): # build a message with tokens for lots of attributes multiple_messages = [] @@ -249,7 +249,7 @@ def test_create_train_load_and_process( featurizer.train(TrainingData([message])) loaded_featurizer = LexicalSyntacticFeaturizer.load( - config={**LexicalSyntacticFeaturizer.get_default_config(), **config,}, + config={**LexicalSyntacticFeaturizer.get_default_config(), **config}, model_storage=default_model_storage, execution_context=default_execution_context, resource=resource_lexical_syntactic_featurizer, @@ -287,7 +287,7 @@ def test_validate_config(config: Dict[Text, Any], raises: bool): @pytest.mark.parametrize( "sentence, feature_config, expected_features", - [("The sun is shining", [["pos", "pos2"]], np.ones(shape=(4, 2)),),], + [("The sun is shining", [["pos", "pos2"]], np.ones(shape=(4, 2)))], ) def test_warn_if_part_of_speech_features_cannot_be_computed( create_lexical_syntactic_featurizer: Callable[ diff --git a/tests/nlu/featurizers/test_lm_featurizer.py b/tests/nlu/featurizers/test_lm_featurizer.py index fa430ae10b70..66f5f72aa254 100644 --- a/tests/nlu/featurizers/test_lm_featurizer.py +++ b/tests/nlu/featurizers/test_lm_featurizer.py @@ -11,10 +11,7 @@ from rasa.engine.graph import ExecutionContext from rasa.engine.storage.storage import ModelStorage from rasa.engine.storage.resource import Resource -from rasa.nlu.constants import ( - TOKENS_NAMES, - NUMBER_OF_SUB_TOKENS, -) +from rasa.nlu.constants import TOKENS_NAMES, NUMBER_OF_SUB_TOKENS from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer from rasa.shared.nlu.training_data.training_data import TrainingData from rasa.shared.nlu.training_data.message import Message @@ -36,7 +33,7 @@ def create_language_model_featurizer( ) -> Callable[[Dict[Text, Any]], LanguageModelFeaturizer]: def inner(config: Dict[Text, Any]) -> LanguageModelFeaturizer: return LanguageModelFeaturizer.create( - config={**LanguageModelFeaturizer.get_default_config(), **config,}, + config={**LanguageModelFeaturizer.get_default_config(), **config}, model_storage=default_model_storage, resource=resource_language_model_featurizer, execution_context=default_execution_context, @@ -90,7 +87,7 @@ def process_training_text( ], whitespace_tokenizer: WhitespaceTokenizer, ) -> List[Message]: - """ Creates a featurizer and process training data """ + """Creates a featurizer and process training data""" config = create_pretrained_transformers_config(model_name, model_weights) lm_featurizer = create_language_model_featurizer(config) @@ -111,7 +108,7 @@ def process_messages( ], whitespace_tokenizer: WhitespaceTokenizer, ) -> List[Message]: - """ Creates a featurizer and processes messages """ + """Creates a featurizer and processes messages""" config = create_pretrained_transformers_config(model_name, model_weights) lm_featurizer = create_language_model_featurizer(config) @@ -554,7 +551,7 @@ def check_subtokens( expected_number_of_sub_tokens: List[List[float]], whitespace_tokenizer: WhitespaceTokenizer, ): - """ Checks that we get the correct number of sub tokens """ + """Checks that we get the correct number of sub tokens""" for index, message in enumerate(messages): assert [ t.get(NUMBER_OF_SUB_TOKENS) for t in message.get(TOKENS_NAMES[TEXT]) @@ -576,7 +573,7 @@ def test_lm_featurizer_num_sub_tokens_process_training_data( whitespace_tokenizer: WhitespaceTokenizer, ): """Tests the number of sub tokens when calling the function - process training data """ + process training data""" messages = process_training_text( texts, model_name, @@ -601,7 +598,7 @@ def test_lm_featurizer_num_sub_tokens_process_messages( whitespace_tokenizer: WhitespaceTokenizer, ): """Tests the number of sub tokens when calling the function - process (messages) """ + process (messages)""" messages = process_messages( texts, model_name, @@ -627,9 +624,7 @@ def test_sequence_length_overflow_train( ], monkeypatch: MonkeyPatch, ): - monkeypatch.setattr( - LanguageModelFeaturizer, "_load_model_instance", lambda _: None, - ) + monkeypatch.setattr(LanguageModelFeaturizer, "_load_model_instance", lambda _: None) component = create_language_model_featurizer({"model_name": model_name}) message = Message.build(text=" ".join(["hi"] * input_sequence_length)) if should_overflow: @@ -661,9 +656,7 @@ def test_long_sequences_extra_padding( ], monkeypatch: MonkeyPatch, ): - monkeypatch.setattr( - LanguageModelFeaturizer, "_load_model_instance", lambda _: None, - ) + monkeypatch.setattr(LanguageModelFeaturizer, "_load_model_instance", lambda _: None) component = create_language_model_featurizer({"model_name": model_name}) modified_sequence_embeddings = component._add_extra_padding( sequence_embeddings, actual_sequence_lengths @@ -700,9 +693,7 @@ def test_input_padding( ], monkeypatch: MonkeyPatch, ): - monkeypatch.setattr( - LanguageModelFeaturizer, "_load_model_instance", lambda _: None, - ) + monkeypatch.setattr(LanguageModelFeaturizer, "_load_model_instance", lambda _: None) component = create_language_model_featurizer({"model_name": "bert"}) component.pad_token_id = 0 padded_input = component._add_padding_to_batch(token_ids, max_sequence_length_model) @@ -758,9 +749,7 @@ def test_attention_mask( ], monkeypatch: MonkeyPatch, ): - monkeypatch.setattr( - LanguageModelFeaturizer, "_load_model_instance", lambda _: None, - ) + monkeypatch.setattr(LanguageModelFeaturizer, "_load_model_instance", lambda _: None) component = create_language_model_featurizer({"model_name": "bert"}) attention_mask = component._compute_attention_mask( @@ -792,10 +781,7 @@ def test_lm_featurizer_correctly_handle_whitespace_token( ], ): - config = { - "model_name": "bert", - "model_weights": "bert-base-chinese", - } + config = {"model_name": "bert", "model_weights": "bert-base-chinese"} lm_featurizer = create_language_model_featurizer(config) diff --git a/tests/nlu/featurizers/test_mitie_featurizer.py b/tests/nlu/featurizers/test_mitie_featurizer.py index 421067a19673..d68ea0151972 100644 --- a/tests/nlu/featurizers/test_mitie_featurizer.py +++ b/tests/nlu/featurizers/test_mitie_featurizer.py @@ -43,7 +43,7 @@ def create( ) -> Callable[[Dict[Text, Any]], MitieFeaturizer]: def inner(config: Dict[Text, Any]): return MitieFeaturizer.create( - config={**MitieFeaturizer.get_default_config(), **config,}, + config={**MitieFeaturizer.get_default_config(), **config}, model_storage=default_model_storage, execution_context=default_execution_context, resource=resource, diff --git a/tests/nlu/featurizers/test_regex_featurizer.py b/tests/nlu/featurizers/test_regex_featurizer.py index 384de53a76d4..e6e955418d61 100644 --- a/tests/nlu/featurizers/test_regex_featurizer.py +++ b/tests/nlu/featurizers/test_regex_featurizer.py @@ -73,13 +73,13 @@ def inner( ), ( "blah balh random eh", - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0],], + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [0.0, 0.0, 0.0], [], ), ( "a 1 digit number", - [[0.0, 0.0, 0.0], [1.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0],], + [[0.0, 0.0, 0.0], [1.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [1.0, 0.0, 1.0], [1, 1], ), @@ -176,8 +176,8 @@ def test_lookup_tables_without_use_word_boundaries( from rasa.nlu.tokenizers.tokenizer import Token lookups = [ - {"name": "cites", "elements": ["北京", "上海", "广州", "深圳", "杭州"],}, - {"name": "dates", "elements": ["昨天", "今天", "明天", "后天"],}, + {"name": "cites", "elements": ["北京", "上海", "广州", "深圳", "杭州"]}, + {"name": "dates", "elements": ["昨天", "今天", "明天", "后天"]}, ] ftr = create_featurizer({"use_word_boundaries": False}) training_data = TrainingData() @@ -221,7 +221,7 @@ def test_lookup_tables_without_use_word_boundaries( ), ( "Is burrito my favorite food?", - [[0.0, 0.0], [0.0, 1.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0],], + [[0.0, 0.0], [0.0, 1.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], [0.0, 1.0], [1.0], ), @@ -393,9 +393,7 @@ def test_regex_featurizer_case_sensitive( {"pattern": "\\bhey*", "name": "hello", "usage": "intent"}, {"pattern": "[0-1]+", "name": "binary", "usage": "intent"}, ] - ftr = create_featurizer( - {"case_sensitive": case_sensitive}, known_patterns=patterns, - ) + ftr = create_featurizer({"case_sensitive": case_sensitive}, known_patterns=patterns) # adds tokens to the message message = Message(data={TEXT: sentence}) diff --git a/tests/nlu/featurizers/test_spacy_featurizer.py b/tests/nlu/featurizers/test_spacy_featurizer.py index b45071b13183..7bbef27bd60b 100644 --- a/tests/nlu/featurizers/test_spacy_featurizer.py +++ b/tests/nlu/featurizers/test_spacy_featurizer.py @@ -14,7 +14,7 @@ def create_spacy_featurizer(config: Dict[Text, Any]) -> SpacyFeaturizer: return SpacyFeaturizer( - {**SpacyFeaturizer.get_default_config(), **config}, "spacy_featurizer", + {**SpacyFeaturizer.get_default_config(), **config}, "spacy_featurizer" ) diff --git a/tests/nlu/selectors/test_selectors.py b/tests/nlu/selectors/test_selectors.py index d08f5b2054e6..6d28026278b8 100644 --- a/tests/nlu/selectors/test_selectors.py +++ b/tests/nlu/selectors/test_selectors.py @@ -150,7 +150,7 @@ def inner( "config_params", [ {EPOCHS: 1}, - {EPOCHS: 1, MASKED_LM: True, TRANSFORMER_SIZE: 256, NUM_TRANSFORMER_LAYERS: 1,}, + {EPOCHS: 1, MASKED_LM: True, TRANSFORMER_SIZE: 256, NUM_TRANSFORMER_LAYERS: 1}, ], ) def test_train_selector( @@ -244,7 +244,7 @@ def test_ground_truth_for_training( create_response_selector: Callable[[Dict[Text, Any]], ResponseSelector], ): response_selector = create_response_selector( - {"use_text_as_label": use_text_as_label}, + {"use_text_as_label": use_text_as_label} ) response_selector.preprocess_train_data(response_selector_training_data) @@ -269,7 +269,7 @@ def test_resolve_intent_response_key_from_label( create_response_selector: Callable[[Dict[Text, Any]], ResponseSelector], ): - response_selector = create_response_selector({"use_text_as_label": train_on_text},) + response_selector = create_response_selector({"use_text_as_label": train_on_text}) response_selector.preprocess_train_data(response_selector_training_data) label_intent_response_key = response_selector._resolve_intent_response_key( @@ -347,13 +347,9 @@ def test_train_persist_load( ] config_params = {EPOCHS: 1} - train_persist_load_with_different_settings( - pipeline, config_params, False, - ) + train_persist_load_with_different_settings(pipeline, config_params, False) - train_persist_load_with_different_settings( - pipeline, config_params, True, - ) + train_persist_load_with_different_settings(pipeline, config_params, True) async def test_process_gives_diagnostic_data( @@ -406,7 +402,7 @@ async def test_process_gives_diagnostic_data( @pytest.mark.parametrize( - "classifier_params", [({LOSS_TYPE: "margin", RANDOM_SEED: 42, EPOCHS: 1})], + "classifier_params", [({LOSS_TYPE: "margin", RANDOM_SEED: 42, EPOCHS: 1})] ) async def test_margin_loss_is_not_normalized( classifier_params: Dict[Text, int], @@ -599,7 +595,7 @@ def test_sets_integer_transformer_size_when_needed( ) -def test_transformer_size_gets_corrected(train_persist_load_with_different_settings,): +def test_transformer_size_gets_corrected(train_persist_load_with_different_settings): """Tests that the default value of `transformer_size` which is `None` is corrected if transformer layers are enabled in `ResponseSelector`. """ @@ -610,7 +606,7 @@ def test_transformer_size_gets_corrected(train_persist_load_with_different_setti config_params = {EPOCHS: 1, NUM_TRANSFORMER_LAYERS: 1} selector = train_persist_load_with_different_settings( - pipeline, config_params, False, + pipeline, config_params, False ) assert selector.component_config[TRANSFORMER_SIZE] == DEFAULT_TRANSFORMER_SIZE @@ -623,12 +619,12 @@ async def test_adjusting_layers_incremental_training( process_message: Callable[..., Message], ): """Tests adjusting sparse layers of `ResponseSelector` to increased sparse - feature sizes during incremental training. + feature sizes during incremental training. - Testing is done by checking the layer sizes. - Checking if they were replaced correctly is also important - and is done in `test_replace_dense_for_sparse_layers` - in `test_rasa_layers.py`. + Testing is done by checking the layer sizes. + Checking if they were replaced correctly is also important + and is done in `test_replace_dense_for_sparse_layers` + in `test_rasa_layers.py`. """ iter1_data_path = "data/test_incremental_training/iter1/" iter2_data_path = "data/test_incremental_training/" diff --git a/tests/nlu/test_evaluation.py b/tests/nlu/test_evaluation.py index fe5892c54597..748186bcbf88 100644 --- a/tests/nlu/test_evaluation.py +++ b/tests/nlu/test_evaluation.py @@ -220,7 +220,7 @@ def test_determine_token_labels_with_extractors(): label = determine_token_labels( CH_correct_segmentation[0], [CH_correct_entity, CH_wrong_entity], - {SpacyEntityExtractor.__name__, MitieEntityExtractor.__name__,}, + {SpacyEntityExtractor.__name__, MitieEntityExtractor.__name__}, ) assert label == "direction" @@ -396,9 +396,7 @@ async def test_run_evaluation_with_regex_message(mood_agent: Agent, tmp_path: Pa ) -async def test_eval_data( - tmp_path: Path, project: Text, trained_rasa_model: Text, -): +async def test_eval_data(tmp_path: Path, project: Text, trained_rasa_model: Text): config_path = os.path.join(project, "config.yml") data_importer = TrainingDataImporter.load_nlu_importer_from_config( config_path, @@ -1017,7 +1015,7 @@ async def test_nlu_comparison( monkeypatch.setattr( sys.modules["rasa.nlu.test"], "get_eval_data", - AsyncMock(return_value=(1, None, (None,),)), + AsyncMock(return_value=(1, None, (None,))), ) monkeypatch.setattr( sys.modules["rasa.nlu.test"], @@ -1221,7 +1219,7 @@ def __init__(self, prediction_to_return: Dict[Text, Any]) -> None: self.prediction = prediction_to_return async def parse_message( - self, message: UserMessage, only_output_properties: bool = True, + self, message: UserMessage, only_output_properties: bool = True ) -> Dict[Text, Any]: return self.prediction diff --git a/tests/nlu/test_spacy_utils.py b/tests/nlu/test_spacy_utils.py index 5e6157791c76..b6297a008715 100644 --- a/tests/nlu/test_spacy_utils.py +++ b/tests/nlu/test_spacy_utils.py @@ -13,7 +13,7 @@ def create_spacy_nlp_component( - model_name: Text = "en_core_web_md", case_sensitive: Optional[bool] = None, + model_name: Text = "en_core_web_md", case_sensitive: Optional[bool] = None ) -> SpacyNLP: component = SpacyNLP.create( {"model": model_name, "case_sensitive": case_sensitive}, None, None, None diff --git a/tests/nlu/test_train.py b/tests/nlu/test_train.py index ecc544a0354e..851ef9f32ced 100644 --- a/tests/nlu/test_train.py +++ b/tests/nlu/test_train.py @@ -76,7 +76,7 @@ def pipelines_for_tests() -> List[Tuple[Text, List[Dict[Text, Any]]]]: ( "en", as_pipeline( - "WhitespaceTokenizer", "LanguageModelFeaturizer", "DIETClassifier", + "WhitespaceTokenizer", "LanguageModelFeaturizer", "DIETClassifier" ), ), ("fallback", as_pipeline("KeywordIntentClassifier", "FallbackClassifier")), @@ -160,7 +160,7 @@ async def test_train_persist_load_parse( ) persisted_path = rasa.model_training.train_nlu( - str(config_file), nlu_as_json_path, output=str(tmp_path), + str(config_file), nlu_as_json_path, output=str(tmp_path) ) assert Path(persisted_path).is_file() @@ -182,11 +182,11 @@ def test_train_persist_load_parse_non_windows( def test_train_model_empty_pipeline(nlu_as_json_path: Text, tmp_path: Path): config_file = tmp_path / "config.yml" - rasa.shared.utils.io.dump_obj_as_json_to_file(config_file, {"pipeline": [],}) + rasa.shared.utils.io.dump_obj_as_json_to_file(config_file, {"pipeline": []}) with pytest.raises(ValueError): rasa.model_training.train_nlu( - str(config_file), nlu_as_json_path, output=str(tmp_path), + str(config_file), nlu_as_json_path, output=str(tmp_path) ) @@ -203,10 +203,10 @@ def test_handles_pipeline_with_non_existing_component( ) with pytest.raises( - Exception, match="Can't load class for name 'my_made_up_component'", + Exception, match="Can't load class for name 'my_made_up_component'" ): rasa.model_training.train_nlu( - str(config_file), nlu_as_json_path, output=str(tmp_path), + str(config_file), nlu_as_json_path, output=str(tmp_path) ) diff --git a/tests/nlu/tokenizers/test_jieba_tokenizer.py b/tests/nlu/tokenizers/test_jieba_tokenizer.py index afd9e631cd48..c0628f901a87 100644 --- a/tests/nlu/tokenizers/test_jieba_tokenizer.py +++ b/tests/nlu/tokenizers/test_jieba_tokenizer.py @@ -20,7 +20,7 @@ def create_jieba(config: Optional[Dict] = None) -> JiebaTokenizer: config = config if config else {} return JiebaTokenizer.create( - {**JiebaTokenizer.get_default_config(), **config}, None, None, None, + {**JiebaTokenizer.get_default_config(), **config}, None, None, None ) diff --git a/tests/nlu/tokenizers/test_tokenizer.py b/tests/nlu/tokenizers/test_tokenizer.py index 473fa42c3d28..e1b72ffc8981 100644 --- a/tests/nlu/tokenizers/test_tokenizer.py +++ b/tests/nlu/tokenizers/test_tokenizer.py @@ -20,7 +20,7 @@ def create_whitespace_tokenizer( config: Optional[Dict[Text, Any]] = None ) -> WhitespaceTokenizer: return WhitespaceTokenizer( - {**WhitespaceTokenizer.get_default_config(), **(config if config else {}),} + {**WhitespaceTokenizer.get_default_config(), **(config if config else {})} ) diff --git a/tests/nlu/tokenizers/test_whitespace_tokenizer.py b/tests/nlu/tokenizers/test_whitespace_tokenizer.py index b9716e0994c8..b01274ad779d 100644 --- a/tests/nlu/tokenizers/test_whitespace_tokenizer.py +++ b/tests/nlu/tokenizers/test_whitespace_tokenizer.py @@ -9,9 +9,9 @@ from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer -def create_whitespace_tokenizer(config: Optional[Dict] = None,) -> WhitespaceTokenizer: +def create_whitespace_tokenizer(config: Optional[Dict] = None) -> WhitespaceTokenizer: config = config if config else {} - return WhitespaceTokenizer({**WhitespaceTokenizer.get_default_config(), **config},) + return WhitespaceTokenizer({**WhitespaceTokenizer.get_default_config(), **config}) @pytest.mark.parametrize( diff --git a/tests/nlu/utils/test_mitie_utils.py b/tests/nlu/utils/test_mitie_utils.py index 5cb24ade65ce..b481d47a3e55 100644 --- a/tests/nlu/utils/test_mitie_utils.py +++ b/tests/nlu/utils/test_mitie_utils.py @@ -54,7 +54,7 @@ def test_provide_different_path( def test_invalid_path( - default_model_storage: ModelStorage, default_execution_context: ExecutionContext, + default_model_storage: ModelStorage, default_execution_context: ExecutionContext ): with pytest.raises(RasaException): MitieNLP.create( diff --git a/tests/shared/core/test_constants.py b/tests/shared/core/test_constants.py index 3396309ebe37..b3c812608cd6 100644 --- a/tests/shared/core/test_constants.py +++ b/tests/shared/core/test_constants.py @@ -4,15 +4,12 @@ from rasa.core.policies.rule_policy import RulePolicy from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier -from rasa.shared.core.constants import ( - CLASSIFIER_NAME_FALLBACK, - POLICY_NAME_RULE, -) +from rasa.shared.core.constants import CLASSIFIER_NAME_FALLBACK, POLICY_NAME_RULE @pytest.mark.parametrize( "name_in_constant, policy_class", - [(POLICY_NAME_RULE, RulePolicy), (CLASSIFIER_NAME_FALLBACK, FallbackClassifier),], + [(POLICY_NAME_RULE, RulePolicy), (CLASSIFIER_NAME_FALLBACK, FallbackClassifier)], ) def test_policy_names(name_in_constant: Text, policy_class: Type): assert name_in_constant == policy_class.__name__ @@ -20,7 +17,7 @@ def test_policy_names(name_in_constant: Text, policy_class: Type): @pytest.mark.parametrize( "name_in_constant, classifier_class", - [(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),], + [(CLASSIFIER_NAME_FALLBACK, FallbackClassifier)], ) def test_classifier_names(name_in_constant: Text, classifier_class: Type): assert name_in_constant == classifier_class.__name__ diff --git a/tests/shared/core/test_domain.py b/tests/shared/core/test_domain.py index 377836df25eb..ce1d41ce6116 100644 --- a/tests/shared/core/test_domain.py +++ b/tests/shared/core/test_domain.py @@ -1213,7 +1213,7 @@ def test_featurized_entities_ordered_consistently(): "type": "float", "mappings": [{"type": "from_intent", "value": 5}], } - }, + } }, { KEY_SLOTS: { @@ -1302,20 +1302,20 @@ def test_form_invalid_required_slots_raises(): KEY_SLOTS: { "my_slot": { "type": "text", - "mappings": [{"type": "from_entity", "intent": "greet"},], + "mappings": [{"type": "from_entity", "intent": "greet"}], } } }, { KEY_SLOTS: { - "my_slot": {"type": "text", "mappings": [{"type": "from_intent"}],} + "my_slot": {"type": "text", "mappings": [{"type": "from_intent"}]} } }, { KEY_SLOTS: { "my_slot": { "type": "text", - "mappings": [{"type": "from_intent", "value": None},], + "mappings": [{"type": "from_intent", "value": None}], } } }, @@ -1331,7 +1331,7 @@ def test_form_invalid_required_slots_raises(): KEY_SLOTS: { "my_slot": { "type": "text", - "mappings": [{"type": "from_trigger_intent", "value": None},], + "mappings": [{"type": "from_trigger_intent", "value": None}], } } }, @@ -1566,7 +1566,7 @@ def test_ignored_intents_slot_mappings_invalid_domain(): } ], } - }, + } } with pytest.raises(InvalidDomain): Domain.from_dict(domain_as_dict) diff --git a/tests/shared/core/test_events.py b/tests/shared/core/test_events.py index 060b9b8d38c8..2a20675453fd 100644 --- a/tests/shared/core/test_events.py +++ b/tests/shared/core/test_events.py @@ -181,7 +181,7 @@ def test_json_parse_action_executed_with_no_hide_rule(): "timestamp": None, } deserialised: ActionExecuted = Event.from_parameters(evt) - expected = ActionExecuted("action_listen",) + expected = ActionExecuted("action_listen") assert deserialised == expected assert deserialised.hide_rule_turn == expected.hide_rule_turn @@ -371,19 +371,19 @@ def test_md_format_message_using_short_entity_syntax(): def test_md_format_message_using_short_entity_syntax_no_start_end(): formatted = format_message( - "hello", intent="location", entities=[{"entity": "city", "value": "Berlin"}], + "hello", intent="location", entities=[{"entity": "city", "value": "Berlin"}] ) assert formatted == "hello" def test_md_format_message_no_text(): - formatted = format_message("", intent="location", entities=[],) + formatted = format_message("", intent="location", entities=[]) assert formatted == "" def test_md_format_message_using_short_entity_syntax_no_start_end_or_text(): formatted = format_message( - "", intent="location", entities=[{"entity": "city", "value": "Berlin"}], + "", intent="location", entities=[{"entity": "city", "value": "Berlin"}] ) assert formatted == "" @@ -415,7 +415,7 @@ def test_md_format_message_using_long_entity_syntax_no_start_end(): intent="location", entities=[ {"start": 10, "end": 16, "entity": "city", "value": "Berlin"}, - {"entity": "country", "value": "Germany", "role": "destination",}, + {"entity": "country", "value": "Germany", "role": "destination"}, ], ) assert formatted == "I am from [Berlin](city)." @@ -556,15 +556,15 @@ def test_split_events( True, ), # providing a single `action_listen` is not a session start - ([ActionExecuted(ACTION_LISTEN_NAME, timestamp=3)], False,), + ([ActionExecuted(ACTION_LISTEN_NAME, timestamp=3)], False), # providing a single `action_session_start` is not a session start - ([ActionExecuted(ACTION_SESSION_START_NAME)], False,), + ([ActionExecuted(ACTION_SESSION_START_NAME)], False), # providing no events is not a session start - ([], False,), + ([], False), ], ) def test_events_begin_with_session_start( - test_events: List[Event], begin_with_session_start: bool, + test_events: List[Event], begin_with_session_start: bool ): assert ( rasa.shared.core.events.do_events_begin_with_session_start(test_events) @@ -637,9 +637,7 @@ def test_print_end_to_end_events(end_to_end_event: Event): ), ], ) -def test_event_executed_comparison( - events: List[Event], comparison_result: bool, -): +def test_event_executed_comparison(events: List[Event], comparison_result: bool): result = all(event == events[0] for event in events) assert result == comparison_result @@ -692,7 +690,7 @@ def test_event_executed_comparison( UserUttered( text="hello", parse_data={ - "intent": {"name": "greet", "confidence": 0.9604260921478271,}, + "intent": {"name": "greet", "confidence": 0.9604260921478271}, "entities": [ {"entity": "city", "value": "London"}, {"entity": "count", "value": 1}, @@ -701,14 +699,14 @@ def test_event_executed_comparison( "message_id": "3f4c04602a4947098c574b107d3ccc50", "metadata": {}, "intent_ranking": [ - {"name": "greet", "confidence": 0.9604260921478271,}, - {"name": "goodbye", "confidence": 0.01835782080888748,}, - {"name": "deny", "confidence": 0.011255578137934208,}, - {"name": "bot_challenge", "confidence": 0.004019865766167641,}, - {"name": "affirm", "confidence": 0.002524246694520116,}, - {"name": "mood_great", "confidence": 0.002214624546468258,}, - {"name": "chitchat", "confidence": 0.0009614597074687481,}, - {"name": "mood_unhappy", "confidence": 0.00024030178610701114,}, + {"name": "greet", "confidence": 0.9604260921478271}, + {"name": "goodbye", "confidence": 0.01835782080888748}, + {"name": "deny", "confidence": 0.011255578137934208}, + {"name": "bot_challenge", "confidence": 0.004019865766167641}, + {"name": "affirm", "confidence": 0.002524246694520116}, + {"name": "mood_great", "confidence": 0.002214624546468258}, + {"name": "chitchat", "confidence": 0.0009614597074687481}, + {"name": "mood_unhappy", "confidence": 0.00024030178610701114}, ], "response_selector": { "all_retrieval_intents": [], @@ -770,9 +768,7 @@ def test_event_subclasses_are_tested(event_class: Type[Event]): assert event_class in subclasses -@pytest.mark.parametrize( - "event", tested_events, -) +@pytest.mark.parametrize("event", tested_events) def test_event_fingerprint_uniqueness(event: Event): f1 = event.fingerprint() event.type_name = "test" diff --git a/tests/shared/core/test_slot_mappings.py b/tests/shared/core/test_slot_mappings.py index e82447fd4c88..6ecd3d4db1cd 100644 --- a/tests/shared/core/test_slot_mappings.py +++ b/tests/shared/core/test_slot_mappings.py @@ -30,7 +30,7 @@ def test_slot_mapping_intent_is_desired(domain: Domain): tracker = DialogueStateTracker("sender_id_test", slots=domain.slots) event1 = UserUttered( text="I'd like to book a restaurant for 2 people.", - intent={"name": "request_restaurant", "confidence": 0.9604260921478271,}, + intent={"name": "request_restaurant", "confidence": 0.9604260921478271}, entities=[{"entity": "number", "value": 2}], ) tracker.update(event1, domain) diff --git a/tests/shared/core/test_slots.py b/tests/shared/core/test_slots.py index 98c918e4326a..e6e2f3f51a68 100644 --- a/tests/shared/core/test_slots.py +++ b/tests/shared/core/test_slots.py @@ -267,7 +267,7 @@ def value_feature_pair(self, request: SubRequest) -> Tuple[Any, List[float]]: @pytest.mark.parametrize("value", ["cat", ["cat"]]) def test_apply_single_item_to_slot( - self, value: Any, mappings: List[Dict[Text, Any]], + self, value: Any, mappings: List[Dict[Text, Any]] ): slot = self.create_slot(mappings=mappings, influence_conversation=False) tracker = DialogueStateTracker.from_events("sender", evts=[], slots=[slot]) @@ -374,9 +374,7 @@ def invalid_value(self, request: SubRequest) -> Any: def value_feature_pair(self, request: SubRequest) -> Tuple[Any, List[float]]: return request.param - def test_exception_if_featurized( - self, mappings: List[Dict[Text, Any]], - ): + def test_exception_if_featurized(self, mappings: List[Dict[Text, Any]]): with pytest.raises(InvalidSlotConfigError): AnySlot("⛔️", mappings=mappings, influence_conversation=True) diff --git a/tests/shared/core/test_trackers.py b/tests/shared/core/test_trackers.py index d16521dbefa8..7562bd7e9a7b 100644 --- a/tests/shared/core/test_trackers.py +++ b/tests/shared/core/test_trackers.py @@ -70,11 +70,7 @@ TEST_MOODBOT_DIALOGUE, TEST_DOMAINS_FOR_DIALOGUES, ) -from tests.core.utilities import ( - tracker_from_dialogue, - user_uttered, - get_tracker, -) +from tests.core.utilities import tracker_from_dialogue, user_uttered, get_tracker from rasa.shared.nlu.constants import ( ACTION_NAME, diff --git a/tests/shared/core/training_data/story_reader/test_yaml_story_reader.py b/tests/shared/core/training_data/story_reader/test_yaml_story_reader.py index 7065e70b1aed..44a173cb31cf 100644 --- a/tests/shared/core/training_data/story_reader/test_yaml_story_reader.py +++ b/tests/shared/core/training_data/story_reader/test_yaml_story_reader.py @@ -744,7 +744,7 @@ def test_read_story_file_with_cycles(domain: Domain): def test_generate_training_data_with_cycles(domain: Domain): featurizer = MaxHistoryTrackerFeaturizer(SingleStateFeaturizer(), max_history=4) training_trackers = training.load_data( - "data/test_yaml_stories/stories_with_cycle.yml", domain, augmentation_factor=0, + "data/test_yaml_stories/stories_with_cycle.yml", domain, augmentation_factor=0 ) _, label_ids, _ = featurizer.featurize_trackers( @@ -1088,7 +1088,7 @@ async def test_unpack_regex_message_has_correct_entity_start_and_end(): slot_1 = {entity: "Core"} text = f"/greet{json.dumps(slot_1)}" - message = Message(data={TEXT: text},) + message = Message(data={TEXT: text}) domain = Domain( intents=["greet"], diff --git a/tests/shared/core/training_data/test_visualization.py b/tests/shared/core/training_data/test_visualization.py index 569398494ad5..3bb4f4f4ad27 100644 --- a/tests/shared/core/training_data/test_visualization.py +++ b/tests/shared/core/training_data/test_visualization.py @@ -183,7 +183,7 @@ def test_story_visualization_with_merging(domain: Domain): "data/test_yaml_stories/stories.yml", domain ) generated_graph = visualization.visualize_stories( - story_steps, domain, output_file=None, max_history=3, should_merge_nodes=True, + story_steps, domain, output_file=None, max_history=3, should_merge_nodes=True ) assert 15 < len(generated_graph.nodes()) < 33 diff --git a/tests/shared/importers/test_multi_project.py b/tests/shared/importers/test_multi_project.py index c61a0847f456..08ebaf5be04a 100644 --- a/tests/shared/importers/test_multi_project.py +++ b/tests/shared/importers/test_multi_project.py @@ -81,7 +81,7 @@ def test_load_from_none(input_dict: Dict, tmp_path: Path): assert actual._imports == list() -def test_load_if_subproject_is_more_specific_than_parent(tmp_path: Path,): +def test_load_if_subproject_is_more_specific_than_parent(tmp_path: Path): config_path = str(tmp_path / "config.yml") utils.dump_obj_as_yaml_to_file(tmp_path / "config.yml", {}) @@ -209,7 +209,7 @@ def test_not_importing_not_relevant_additional_files(tmp_path: Path): assert not selector.is_imported(str(not_relevant_file2)) -def test_not_importing_e2e_conversation_tests_in_project(tmp_path: Path,): +def test_not_importing_e2e_conversation_tests_in_project(tmp_path: Path): config = {"imports": ["bots/Bot A"]} config_path = str(tmp_path / "config.yml") utils.dump_obj_as_yaml_to_file(config_path, config) diff --git a/tests/shared/nlu/training_data/test_message.py b/tests/shared/nlu/training_data/test_message.py index 589334e1c4e1..d1010fd6dffe 100644 --- a/tests/shared/nlu/training_data/test_message.py +++ b/tests/shared/nlu/training_data/test_message.py @@ -371,9 +371,7 @@ def test_features_present( (Message({TEXT: "text"}), True), ], ) -def test_is_core_or_domain_message( - message: Message, result: bool, -): +def test_is_core_or_domain_message(message: Message, result: bool): assert result == message.is_core_or_domain_message() @@ -395,7 +393,7 @@ def test_message_fingerprint_includes_data_and_features( assert fp1 != fp2 message.add_features( - Features(scipy.sparse.csr_matrix([1, 1, 0]), FEATURE_TYPE_SEQUENCE, TEXT, "c2",) + Features(scipy.sparse.csr_matrix([1, 1, 0]), FEATURE_TYPE_SEQUENCE, TEXT, "c2") ) fp3 = message.fingerprint() diff --git a/tests/shared/nlu/training_data/test_training_data.py b/tests/shared/nlu/training_data/test_training_data.py index c60f95cb2251..b38bd94a2249 100644 --- a/tests/shared/nlu/training_data/test_training_data.py +++ b/tests/shared/nlu/training_data/test_training_data.py @@ -204,12 +204,7 @@ def test_composite_entities_data(): assert len(td.intent_examples) == 29 assert len(td.training_examples) == 29 assert td.entity_synonyms == {"SF": "San Fransisco"} - assert td.intents == { - "order_pizza", - "book_flight", - "chitchat", - "affirm", - } + assert td.intents == {"order_pizza", "book_flight", "chitchat", "affirm"} assert td.entities == {"location", "topping", "size"} assert td.entity_groups == {"1", "2"} assert td.entity_roles == {"to", "from"} @@ -442,10 +437,7 @@ def test_validate_number_of_examples_per_intent(): ) message_non_nlu_intent = Message(data={"intent": "subscribe"}) - training_examples = [ - message_intent, - message_non_nlu_intent, - ] + training_examples = [message_intent, message_non_nlu_intent] training_data = TrainingData(training_examples=training_examples) with pytest.warns(Warning) as w: @@ -875,9 +867,7 @@ def test_fingerprint_is_different_when_lookup_table_has_changed( ): from rasa.shared.importers.utils import training_data_from_paths - files = [ - "data/test/lookup_tables/lookup_table.json", - ] + files = ["data/test/lookup_tables/lookup_table.json"] td1 = training_data_from_paths(files, language="en") fingerprint1 = td1.fingerprint() diff --git a/tests/shared/test_data.py b/tests/shared/test_data.py index 2eae62d37a39..805eb68c56c3 100644 --- a/tests/shared/test_data.py +++ b/tests/shared/test_data.py @@ -119,7 +119,7 @@ def test_get_core_nlu_files(project): "data/examples/dialogflow/package.json", ], ), - ("luis", ["data/examples/luis/demo-restaurants_v7.json",],), + ("luis", ["data/examples/luis/demo-restaurants_v7.json"]), ( "rasa", [ diff --git a/tests/shared/utils/schemas/test_events.py b/tests/shared/utils/schemas/test_events.py index a865ace4b310..25ee9858bfb3 100644 --- a/tests/shared/utils/schemas/test_events.py +++ b/tests/shared/utils/schemas/test_events.py @@ -28,7 +28,7 @@ "id": 5103161883140543062, "name": "greet", "confidence": 0.9999818801879883, - }, + } ], "response_selector": { "all_retrieval_intents": ["chitchat"], @@ -47,8 +47,8 @@ "chitchat": { "response": { "id": -2223917631873543698, - "responses": [{"text": "I am called Retrieval Bot!"},], - "response_templates": [{"text": "I am called Retrieval Bot!"},], + "responses": [{"text": "I am called Retrieval Bot!"}], + "response_templates": [{"text": "I am called Retrieval Bot!"}], "confidence": 0.9660752415657043, "intent_response_key": "chitchat/ask_name", "utter_action": "utter_chitchat/ask_name", @@ -59,7 +59,7 @@ "id": -2223917631873543698, "confidence": 0.9660752415657043, "intent_response_key": "chitchat/ask_name", - }, + } ], }, }, diff --git a/tests/test_memory_leak.py b/tests/test_memory_leak.py index f734ddfee0c2..8fed17d7e577 100644 --- a/tests/test_memory_leak.py +++ b/tests/test_memory_leak.py @@ -57,9 +57,7 @@ def function_to_profile(self) -> None: raise NotImplementedError @pytest.mark.timeout(720, func_only=True) - def test_for_memory_leak( - self, name_for_dumped_files: Text, tmp_path: Path, - ) -> None: + def test_for_memory_leak(self, name_for_dumped_files: Text, tmp_path: Path) -> None: # Run as separate process to avoid other things affecting the memory usage. # Unfortunately `memory-profiler` doesn't work properly with # `multiprocessing.Process` as it can't handle the process exit @@ -83,10 +81,7 @@ def test_for_memory_leak( time.sleep(0.01) results = memory_profiler.memory_usage( - process, - interval=PROFILING_INTERVAL, - include_children=True, - timestamps=True, + process, interval=PROFILING_INTERVAL, include_children=True, timestamps=True ) # `memory-profiler` sometimes adds `None` values at the end which we don't need diff --git a/tests/test_model_training.py b/tests/test_model_training.py index 5856481ab33c..915011604cb2 100644 --- a/tests/test_model_training.py +++ b/tests/test_model_training.py @@ -82,7 +82,7 @@ def test_train_temp_files( # a new model because nothing has been changed. It also shouldn't create # any temp files. rasa.train( - domain_path, stack_config_path, [stories_path, nlu_data_path], output=output, + domain_path, stack_config_path, [stories_path, nlu_data_path], output=output ) assert count_temp_rasa_files(tempfile.tempdir) == 0 @@ -101,7 +101,7 @@ def test_train_core_temp_files( monkeypatch.setattr(tempfile, "tempdir", tmp_path / "training") rasa.model_training.train_core( - domain_path, stack_config_path, stories_path, output=str(tmp_path / "models"), + domain_path, stack_config_path, stories_path, output=str(tmp_path / "models") ) assert count_temp_rasa_files(tempfile.tempdir) == 0 @@ -244,7 +244,7 @@ def test_train_nlu_autoconfig( monkeypatch.setattr(GraphTrainer, GraphTrainer.train.__name__, Mock()) # do training rasa.model_training.train_nlu( - stack_config_path, nlu_data_path, output="test_train_nlu_temp_files_models", + stack_config_path, nlu_data_path, output="test_train_nlu_temp_files_models" ) mocked_get_configuration.assert_called_once() @@ -480,7 +480,7 @@ def test_training_core_with_e2e_fails_gracefully( e2e_stories_path: Text, ): rasa.model_training.train_core( - domain_path, stack_config_path, e2e_stories_path, output=str(tmp_path), + domain_path, stack_config_path, e2e_stories_path, output=str(tmp_path) ) assert not list(tmp_path.glob("*")) @@ -601,7 +601,7 @@ def test_model_finetuning_core_with_default_epochs( def test_model_finetuning_core_new_domain_label( - tmp_path: Path, monkeypatch: MonkeyPatch, trained_moodbot_path: Text, + tmp_path: Path, monkeypatch: MonkeyPatch, trained_moodbot_path: Text ): (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -623,7 +623,7 @@ def test_model_finetuning_core_new_domain_label( def test_model_finetuning_new_domain_label_stops_all_training( - tmp_path: Path, trained_moodbot_path: Text, + tmp_path: Path, trained_moodbot_path: Text ): (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -689,9 +689,7 @@ def test_model_finetuning_nlu( assert metadata.training_type == autoconfig.TrainingType.NLU -def test_model_finetuning_nlu_new_label( - tmp_path: Path, trained_nlu_moodbot_path: Text, -): +def test_model_finetuning_nlu_new_label(tmp_path: Path, trained_nlu_moodbot_path: Text): (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -711,7 +709,7 @@ def test_model_finetuning_nlu_new_label( def test_model_finetuning_nlu_new_entity( - tmp_path: Path, trained_nlu_moodbot_path: Text, + tmp_path: Path, trained_nlu_moodbot_path: Text ): (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -758,7 +756,7 @@ def test_model_finetuning_nlu_new_label_already_in_domain( def test_model_finetuning_nlu_new_label_to_domain_only( - tmp_path: Path, trained_nlu_moodbot_path: Text, + tmp_path: Path, trained_nlu_moodbot_path: Text ): (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -955,7 +953,7 @@ def test_models_not_retrained_if_only_new_action( def test_invalid_graph_schema( - tmp_path: Path, domain_path: Text, stories_path: Text, nlu_data_path: Text, + tmp_path: Path, domain_path: Text, stories_path: Text, nlu_data_path: Text ): config = textwrap.dedent( """ @@ -1015,9 +1013,7 @@ def test_fingerprint_changes_if_module_changes( ) # Train to initialize cache - rasa.train( - domain_path, str(new_config_path), [stories_path], output=str(tmp_path), - ) + rasa.train(domain_path, str(new_config_path), [stories_path], output=str(tmp_path)) # Make sure that the caching works as expected the code didn't change result = rasa.train( diff --git a/tests/test_server.py b/tests/test_server.py index a46723a27806..1934b6e38c3f 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -235,7 +235,7 @@ def run_server(monkeypatch: MonkeyPatch) -> NoReturn: import sys monkeypatch.setattr( - sys.modules["rasa.model_training"], "train", mocked_training_function, + sys.modules["rasa.model_training"], "train", mocked_training_function ) from rasa import __main__ @@ -439,7 +439,7 @@ async def test_parse_without_nlu_model(rasa_app_core: SanicASGITestClient): assert all(prop in rjs for prop in ["entities", "intent", "text"]) -async def test_parse_on_invalid_emulation_mode(rasa_app: SanicASGITestClient,): +async def test_parse_on_invalid_emulation_mode(rasa_app: SanicASGITestClient): _, response = await rasa_app.post( "/model/parse?emulation_mode=ANYTHING", json={"text": "hello"} ) @@ -557,7 +557,7 @@ async def test_train_with_retrieval_events_success( def assert_trained_model( - response_body: bytes, tmp_path_factory: TempPathFactory, + response_body: bytes, tmp_path_factory: TempPathFactory ) -> None: # save model to temporary file @@ -573,7 +573,7 @@ def assert_trained_model( async def test_train_with_yaml( - rasa_app: SanicASGITestClient, tmp_path_factory: TempPathFactory, + rasa_app: SanicASGITestClient, tmp_path_factory: TempPathFactory ): training_data = """ version: "3.0" @@ -791,7 +791,7 @@ async def test_evaluate_stories_end_to_end( } -async def test_add_message(rasa_app: SanicASGITestClient,): +async def test_add_message(rasa_app: SanicASGITestClient): conversation_id = "test_add_message_test_id" @@ -858,7 +858,7 @@ async def test_evaluate_invalid_intent_model_file(rasa_app: SanicASGITestClient) async def test_evaluate_intent_without_body(rasa_app: SanicASGITestClient): _, response = await rasa_app.post( - "/model/test/intents", headers={"Content-type": rasa.server.YAML_CONTENT_TYPE}, + "/model/test/intents", headers={"Content-type": rasa.server.YAML_CONTENT_TYPE} ) assert response.status == HTTPStatus.BAD_REQUEST @@ -1180,12 +1180,12 @@ async def test_predict_invalid_entities_format(rasa_app: SanicASGITestClient): async def test_predict_empty_request_body(rasa_app: SanicASGITestClient): _, response = await rasa_app.post( - "/model/predict", headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE}, + "/model/predict", headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE} ) assert response.status == HTTPStatus.BAD_REQUEST -async def test_append_events_empty_request_body(rasa_app: SanicASGITestClient,): +async def test_append_events_empty_request_body(rasa_app: SanicASGITestClient): _, response = await rasa_app.post( "/conversations/testid/tracker/events", headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE}, diff --git a/tests/test_telemetry.py b/tests/test_telemetry.py index 35b66de28fa9..742e8691229b 100644 --- a/tests/test_telemetry.py +++ b/tests/test_telemetry.py @@ -409,7 +409,7 @@ def _create_exception_event_in_file(filename: Text) -> Dict[Text, Any]: ], "context_line": " sys.exit(load_entry_point('rasa', 'console_scripts', 'rasa')())", "post_context": [], - }, + } ] }, } diff --git a/tests/test_validator.py b/tests/test_validator.py index 95a42d6beec8..0f0b9674164d 100644 --- a/tests/test_validator.py +++ b/tests/test_validator.py @@ -52,7 +52,7 @@ def test_verify_nlu_with_e2e_story(tmp_path: Path, nlu_data_path: Path): def test_verify_intents_does_not_fail_on_valid_data(nlu_data_path: Text): importer = RasaFileImporter( - domain_path="data/test_moodbot/domain.yml", training_data_paths=[nlu_data_path], + domain_path="data/test_moodbot/domain.yml", training_data_paths=[nlu_data_path] ) validator = Validator.from_importer(importer) assert validator.verify_intents() @@ -61,8 +61,7 @@ def test_verify_intents_does_not_fail_on_valid_data(nlu_data_path: Text): def test_verify_intents_does_fail_on_invalid_data(nlu_data_path: Text): # domain and nlu data are from different domain and should produce warnings importer = RasaFileImporter( - domain_path="data/test_domains/default.yml", - training_data_paths=[nlu_data_path], + domain_path="data/test_domains/default.yml", training_data_paths=[nlu_data_path] ) validator = Validator.from_importer(importer) assert not validator.verify_intents() @@ -94,7 +93,7 @@ def test_verify_valid_responses_in_rules(nlu_data_path: Text): def test_verify_story_structure(stories_path: Text): importer = RasaFileImporter( - domain_path="data/test_domains/default.yml", training_data_paths=[stories_path], + domain_path="data/test_domains/default.yml", training_data_paths=[stories_path] ) validator = Validator.from_importer(importer) assert validator.verify_story_structure(ignore_warnings=False) @@ -221,7 +220,7 @@ def test_verify_there_is_example_repetition_in_intents(nlu_data_path: Text): # moodbot nlu data already has duplicated example 'good afternoon' # for intents greet and goodbye importer = RasaFileImporter( - domain_path="data/test_moodbot/domain.yml", training_data_paths=[nlu_data_path], + domain_path="data/test_moodbot/domain.yml", training_data_paths=[nlu_data_path] ) validator = Validator.from_importer(importer) assert not validator.verify_example_repetition_in_intents(False) @@ -269,7 +268,7 @@ def test_verify_logging_message_for_repetition_in_intents(caplog, nlu_data_path: # moodbot nlu data already has duplicated example 'good afternoon' # for intents greet and goodbye importer = RasaFileImporter( - domain_path="data/test_moodbot/domain.yml", training_data_paths=[nlu_data_path], + domain_path="data/test_moodbot/domain.yml", training_data_paths=[nlu_data_path] ) validator = Validator.from_importer(importer) caplog.clear() # clear caplog to avoid counting earlier debug messages @@ -325,7 +324,7 @@ def test_verify_actions_in_stories_not_in_domain(tmp_path: Path, domain_path: Te ) importer = RasaFileImporter( - domain_path=domain_path, training_data_paths=[story_file_name], + domain_path=domain_path, training_data_paths=[story_file_name] ) validator = Validator.from_importer(importer) with pytest.warns(UserWarning) as warning: @@ -351,7 +350,7 @@ def test_verify_actions_in_rules_not_in_domain(tmp_path: Path, domain_path: Text """ ) importer = RasaFileImporter( - domain_path=domain_path, training_data_paths=[rules_file_name], + domain_path=domain_path, training_data_paths=[rules_file_name] ) validator = Validator.from_importer(importer) with pytest.warns(UserWarning) as warning: @@ -446,7 +445,7 @@ def test_valid_stories_rules_actions_in_domain( - action: action_greet """ ) - importer = RasaFileImporter(domain_path=domain, training_data_paths=[file_name],) + importer = RasaFileImporter(domain_path=domain, training_data_paths=[file_name]) validator = Validator.from_importer(importer) assert validator.verify_actions_in_stories_rules() @@ -476,7 +475,7 @@ def test_valid_stories_rules_default_actions( - action: action_restart """ ) - importer = RasaFileImporter(domain_path=domain, training_data_paths=[file_name],) + importer = RasaFileImporter(domain_path=domain, training_data_paths=[file_name]) validator = Validator.from_importer(importer) assert validator.verify_actions_in_stories_rules() diff --git a/tests/train.py b/tests/train.py index ea22293a0eed..1d499e9963cd 100644 --- a/tests/train.py +++ b/tests/train.py @@ -62,7 +62,7 @@ def pipelines_for_tests() -> List[Tuple[Text, List[Dict[Text, Any]]]]: ( "en", as_pipeline( - "WhitespaceTokenizer", "LanguageModelFeaturizer", "DIETClassifier", + "WhitespaceTokenizer", "LanguageModelFeaturizer", "DIETClassifier" ), ), ("fallback", as_pipeline("KeywordIntentClassifier", "FallbackClassifier")), diff --git a/tests/utils/tensorflow/test_callback.py b/tests/utils/tensorflow/test_callback.py index 433385bf2b49..c326b2502e2b 100644 --- a/tests/utils/tensorflow/test_callback.py +++ b/tests/utils/tensorflow/test_callback.py @@ -59,7 +59,7 @@ def test_does_model_improve( @pytest.fixture(scope="module") def trained_ted( - tmp_path_factory: TempPathFactory, moodbot_domain_path: Path, + tmp_path_factory: TempPathFactory, moodbot_domain_path: Path ) -> TEDPolicy: training_files = "data/test_moodbot/data/stories.yml" domain = Domain.load(moodbot_domain_path) diff --git a/tests/utils/tensorflow/test_layers.py b/tests/utils/tensorflow/test_layers.py index 15bc2a885da3..bc98fdd000c3 100644 --- a/tests/utils/tensorflow/test_layers.py +++ b/tests/utils/tensorflow/test_layers.py @@ -10,12 +10,7 @@ RandomlyConnectedDense, DenseForSparse, ) -from rasa.utils.tensorflow.constants import ( - INNER, - SOFTMAX, - LABEL, - LABEL_PAD_ID, -) +from rasa.utils.tensorflow.constants import INNER, SOFTMAX, LABEL, LABEL_PAD_ID import rasa.utils.tensorflow.layers_utils as layers_utils from rasa.shared.nlu.constants import ( TEXT, @@ -29,7 +24,7 @@ def test_dot_product_loss_inner_sim(): - layer = DotProductLoss(0, similarity_type=INNER,) + layer = DotProductLoss(0, similarity_type=INNER) a = tf.constant([[[1.0, 0.0, 2.0]], [[1.0, 0.0, 2.0]]]) b = tf.constant([[[1.0, 0.0, -2.0]], [[1.0, 0.0, -2.0]]]) mask = tf.constant([[1.0, 0.0]]) @@ -243,7 +238,7 @@ def mock_random_indices(*args, **kwargs) -> tf.Tensor: pos_neg_labels.numpy() == np.array( [ - [1, 0,], # l0 is an actual positive example in `batch_labels_embed[0]`, + [1, 0], # l0 is an actual positive example in `batch_labels_embed[0]`, # whereas l2 is not [ 0, @@ -330,13 +325,13 @@ def mock_random_indices(*args, **kwargs) -> tf.Tensor: pos_neg_labels.numpy() == np.array( [ - [1, 0,], # l0 is an actual positive example in `batch_labels_embed[0]`, + [1, 0], # l0 is an actual positive example in `batch_labels_embed[0]`, # whereas l2 is not [ 0, 0, ], # Neither l0 nor l1 are positive examples in `batch_labels_embed[1]` - [1, 0,], # l3 is an actual positive example in `batch_labels_embed[2]`, + [1, 0], # l3 is an actual positive example in `batch_labels_embed[2]`, # whereas l1 is not ] ) @@ -363,7 +358,7 @@ def test_multi_label_dot_product_loss__loss_sigmoid_is_ln2_when_all_similarities "model_confidence, mock_similarities, expected_confidences", [ # Confidence is always `1.0` since only one option exists and we use softmax - (SOFTMAX, [[[-3.0], [0.0]]], [[[1.0], [1.0]]]), + (SOFTMAX, [[[-3.0], [0.0]]], [[[1.0], [1.0]]]) ], ) def test_dot_product_loss_get_similarities_and_confidences_from_embeddings( @@ -459,7 +454,7 @@ def test_randomly_connected_dense_all_inputs_connected(): def test_dense_for_sparse_get_feature_type( layer_name: Text, expected_feature_type: Union[Text, None] ): - layer = DenseForSparse(name=layer_name, units=10,) + layer = DenseForSparse(name=layer_name, units=10) assert layer.get_feature_type() == expected_feature_type @@ -489,5 +484,5 @@ def test_dense_for_sparse_get_feature_type( def test_dense_for_sparse_get_attribute( layer_name: Text, expected_attribute: Union[Text, None] ): - layer = DenseForSparse(name=layer_name, units=10,) + layer = DenseForSparse(name=layer_name, units=10) assert layer.get_attribute() == expected_attribute diff --git a/tests/utils/tensorflow/test_model_data_utils.py b/tests/utils/tensorflow/test_model_data_utils.py index 58e264e75d72..6536829be5cd 100644 --- a/tests/utils/tensorflow/test_model_data_utils.py +++ b/tests/utils/tensorflow/test_model_data_utils.py @@ -184,13 +184,7 @@ def test_extract_features(): @pytest.mark.parametrize( "text, intent, entities, attributes, real_sparse_feature_sizes", [ - ( - "Hello!", - "greet", - None, - [TEXT], - {"text": {"sequence": [1], "sentence": [1]}}, - ), + ("Hello!", "greet", None, [TEXT], {"text": {"sequence": [1], "sentence": [1]}}), ( "Hello!", "greet", @@ -258,7 +252,7 @@ def test_convert_training_examples( ) ] output, sparse_feature_sizes = model_data_utils.featurize_training_examples( - [message], attributes=attributes, entity_tag_specs=entity_tag_spec, + [message], attributes=attributes, entity_tag_specs=entity_tag_spec ) assert len(output) == 1 diff --git a/tests/utils/tensorflow/test_models.py b/tests/utils/tensorflow/test_models.py index 33ef135d013c..caa131a4e07e 100644 --- a/tests/utils/tensorflow/test_models.py +++ b/tests/utils/tensorflow/test_models.py @@ -6,16 +6,8 @@ from rasa.utils.tensorflow.models import RasaModel, TransformerRasaModel from rasa.utils.tensorflow.model_data import RasaModelData from rasa.utils.tensorflow.model_data import FeatureArray -from rasa.utils.tensorflow.constants import ( - LABEL, - IDS, - SENTENCE, -) -from rasa.shared.nlu.constants import ( - TEXT, - FEATURE_TYPE_SENTENCE, - FEATURE_TYPE_SEQUENCE, -) +from rasa.utils.tensorflow.constants import LABEL, IDS, SENTENCE +from rasa.shared.nlu.constants import TEXT, FEATURE_TYPE_SENTENCE, FEATURE_TYPE_SEQUENCE @pytest.mark.parametrize( @@ -69,7 +61,7 @@ def test_equal_dicts( @pytest.mark.parametrize( "batch_size, number_of_data_points, expected_number_of_batch_iterations", - [(2, 3, 2), (1, 3, 3), (5, 3, 1),], + [(2, 3, 2), (1, 3, 3), (5, 3, 1)], ) def test_batch_inference( batch_size: int, @@ -102,9 +94,8 @@ def _batch_predict( TEXT: { SENTENCE: [ FeatureArray( - np.random.rand(number_of_data_points, 2), - number_of_dimensions=2, - ), + np.random.rand(number_of_data_points, 2), number_of_dimensions=2 + ) ] } }, @@ -204,7 +195,7 @@ def test_raise_exception_decreased_sparse_feature_sizes( raise_exception: bool, ): """Tests if exception is raised when sparse feature sizes decrease - during incremental training.""" + during incremental training.""" if raise_exception: with pytest.raises(Exception) as exec_info: TransformerRasaModel._check_if_sparse_feature_sizes_decreased( diff --git a/tests/utils/tensorflow/test_rasa_layers.py b/tests/utils/tensorflow/test_rasa_layers.py index 6b999322a5a2..4055b9dd6e1f 100644 --- a/tests/utils/tensorflow/test_rasa_layers.py +++ b/tests/utils/tensorflow/test_rasa_layers.py @@ -4,11 +4,7 @@ from typing import Text, Union, Any, Dict, List, Type -from rasa.shared.nlu.constants import ( - TEXT, - FEATURE_TYPE_SENTENCE, - FEATURE_TYPE_SEQUENCE, -) +from rasa.shared.nlu.constants import TEXT, FEATURE_TYPE_SENTENCE, FEATURE_TYPE_SEQUENCE from rasa.utils.tensorflow import layers from rasa.utils.tensorflow.rasa_layers import ( ConcatenateSparseDenseFeatures, @@ -88,7 +84,7 @@ }, ) -model_config_transformer_mlm = dict(model_config_transformer, **{MASKED_LM: True},) +model_config_transformer_mlm = dict(model_config_transformer, **{MASKED_LM: True}) # Dummy feature signatures and features (full of 1s) for tests that don't check exact @@ -195,7 +191,7 @@ "attribute_signature": { "sequence": [], "sentence": [feature_signature_dense_1], - }, + } }, units_1, ), @@ -207,7 +203,7 @@ "attribute_signature": { "sequence": [feature_signature_dense_1], "sentence": [], - }, + } }, units_1, ), @@ -326,7 +322,7 @@ def test_layer_gives_correct_output_units( SENTENCE: [feature_signature_dense_1], } }, - ([feature_dense_seq_1], [feature_dense_sent_1], sequence_lengths,), + ([feature_dense_seq_1], [feature_dense_sent_1], sequence_lengths), [ [batch_size, max_seq_length + 1, units_1], [batch_size, max_seq_length + 1, 1], @@ -341,7 +337,7 @@ def test_layer_gives_correct_output_units( "attribute_signature": { "sequence": [], "sentence": [feature_signature_dense_1], - }, + } }, ([], [feature_dense_sent_1], sequence_lengths_empty), [[batch_size, 1, units_1], [batch_size, 1, 1]], @@ -355,7 +351,7 @@ def test_layer_gives_correct_output_units( "attribute_signature": { "sequence": [feature_signature_dense_1], "sentence": [], - }, + } }, ([feature_dense_seq_1], [], sequence_lengths), [[batch_size, max_seq_length, units_1], [batch_size, max_seq_length, 1]], @@ -385,8 +381,8 @@ def test_layer_gives_correct_output_units( [batch_size, max_seq_length + 1, units_transformer], [batch_size, max_seq_length + 1, units_hidden_layer], [batch_size, max_seq_length + 1, 1], - [0,], - [0,], + [0], + [0], [ batch_size, num_transformer_layers, @@ -406,9 +402,9 @@ def test_layer_gives_correct_output_units( [batch_size, max_seq_length + 1, units_hidden_layer], [batch_size, max_seq_length + 1, units_hidden_layer], [batch_size, max_seq_length + 1, 1], - [0,], - [0,], - [0,], + [0], + [0], + [0], ], "same_as_train", ), @@ -422,9 +418,9 @@ def test_layer_gives_correct_output_units( [batch_size, max_seq_length + 1, units_concat], [batch_size, max_seq_length + 1, units_concat], [batch_size, max_seq_length + 1, 1], - [0,], - [0,], - [0,], + [0], + [0], + [0], ], "same_as_train", ), @@ -439,7 +435,7 @@ def test_layer_gives_correct_output_units( SENTENCE: [], } }, - ([feature_sparse_seq_1], [], sequence_lengths,), + ([feature_sparse_seq_1], [], sequence_lengths), [ [batch_size, max_seq_length, units_transformer], [batch_size, max_seq_length, units_hidden_layer], @@ -458,8 +454,8 @@ def test_layer_gives_correct_output_units( [batch_size, max_seq_length, units_transformer], [batch_size, max_seq_length, units_hidden_layer], [batch_size, max_seq_length, 1], - [0,], - [0,], + [0], + [0], [ batch_size, num_transformer_layers, @@ -479,7 +475,7 @@ def test_correct_output_shape( expected_output_shapes_train: List[List[int]], expected_output_shapes_test: Union[Text, List[List[int]]], ) -> None: - layer = layer_class(**layer_args, attribute=attribute_name, config=model_config,) + layer = layer_class(**layer_args, attribute=attribute_name, config=model_config) train_outputs = layer(layer_inputs, training=True) if not isinstance(train_outputs, tuple): @@ -547,7 +543,7 @@ def test_concat_sparse_dense_raises_exception_when_inconsistent_sparse_features( is_sparse=False, units=1, number_of_dimensions=3 ) realistic_feature_dense_seq_1 = tf.convert_to_tensor( - [[[10.0], [20.0], [30.0]], [[40.0], [50.0], [0.0]],], dtype=tf.float32 + [[[10.0], [20.0], [30.0]], [[40.0], [50.0], [0.0]]], dtype=tf.float32 ) realistic_feature_signature_dense_2 = FeatureSignature( @@ -562,7 +558,7 @@ def test_concat_sparse_dense_raises_exception_when_inconsistent_sparse_features( is_sparse=False, units=3, number_of_dimensions=3 ) realistic_feature_dense_sent_3 = tf.convert_to_tensor( - [[[0.1, 0.2, 0.3]], [[0.4, 0.5, 0.6]]], dtype=tf.float32, + [[[0.1, 0.2, 0.3]], [[0.4, 0.5, 0.6]]], dtype=tf.float32 ) realistic_sequence_lengths = tf.convert_to_tensor([3, 2], dtype=tf.int32) @@ -750,7 +746,7 @@ def test_feature_combining_correct_output( realistic_feature_signature_dense_1, realistic_feature_signature_dense_2, ], - SENTENCE: [realistic_feature_signature_dense_3,], + SENTENCE: [realistic_feature_signature_dense_3], }, ( [realistic_feature_dense_seq_1, realistic_feature_dense_seq_2], @@ -798,8 +794,8 @@ def test_feature_combining_correct_output( ( np.array( [ - [[10.0, 1.0, 2.0], [20.0, 3.0, 4.0], [30.0, 5.0, 6.0],], - [[40.0, 1.5, 2.5], [50.0, 3.5, 4.5], [0.0, 0.0, 0.0],], + [[10.0, 1.0, 2.0], [20.0, 3.0, 4.0], [30.0, 5.0, 6.0]], + [[40.0, 1.5, 2.5], [50.0, 3.5, 4.5], [0.0, 0.0, 0.0]], ], dtype=np.float32, ), @@ -827,7 +823,7 @@ def test_sequence_layer_correct_output( mask_seq_sent_expected, token_ids_expected, ) = expected_outputs_train - (_, seq_sent_features, mask_seq_sent, token_ids, mlm_boolean_mask, _,) = layer( + (_, seq_sent_features, mask_seq_sent, token_ids, mlm_boolean_mask, _) = layer( inputs, training=True ) assert (seq_sent_features.numpy() == seq_sent_features_expected).all() @@ -842,7 +838,7 @@ def test_sequence_layer_correct_output( assert not mlm_boolean_mask.numpy()[0][realistic_sequence_lengths.numpy()][0] # Test-time check - (seq_sent_features_expected, mask_seq_sent_expected, _,) = expected_outputs_train + (seq_sent_features_expected, mask_seq_sent_expected, _) = expected_outputs_train ( transformer_outputs, seq_sent_features, diff --git a/tests/utils/test_common.py b/tests/utils/test_common.py index e7f5d17d169c..f784ffa99d7c 100644 --- a/tests/utils/test_common.py +++ b/tests/utils/test_common.py @@ -131,7 +131,7 @@ def test_find_unavailable_packages(): [ (Path, "pathlib.Path"), (Agent, "rasa.core.agent.Agent"), - (DIETClassifier, "rasa.nlu.classifiers.diet_classifier.DIETClassifier",), + (DIETClassifier, "rasa.nlu.classifiers.diet_classifier.DIETClassifier"), ], ) def test_module_path_from_class(clazz: Type, module_path: Text): diff --git a/tests/utils/test_endpoints.py b/tests/utils/test_endpoints.py index c945e78b1bf3..847c940622b9 100644 --- a/tests/utils/test_endpoints.py +++ b/tests/utils/test_endpoints.py @@ -99,14 +99,12 @@ async def test_endpoint_config_with_cafile(tmp_path: Path): with aioresponses() as mocked: endpoint = endpoint_utils.EndpointConfig( - "https://example.com/", cafile=str(cafile), + "https://example.com/", cafile=str(cafile) ) - mocked.post( - "https://example.com/", status=200, - ) + mocked.post("https://example.com/", status=200) - await endpoint.request("post",) + await endpoint.request("post") request = latest_request(mocked, "post", "https://example.com/")[-1] @@ -118,12 +116,10 @@ async def test_endpoint_config_with_cafile(tmp_path: Path): async def test_endpoint_config_with_non_existent_cafile(tmp_path: Path): cafile = "data/test_endpoints/no_file.pem" - endpoint = endpoint_utils.EndpointConfig( - "https://example.com/", cafile=str(cafile), - ) + endpoint = endpoint_utils.EndpointConfig("https://example.com/", cafile=str(cafile)) with pytest.raises(FileNotFoundException): - await endpoint.request("post",) + await endpoint.request("post") def test_endpoint_config_default_token_name(): @@ -160,7 +156,7 @@ async def test_request_non_json_response(): @pytest.mark.parametrize( "filename, endpoint_type", - [("data/test_endpoints/example_endpoints.yml", "tracker_store"),], + [("data/test_endpoints/example_endpoints.yml", "tracker_store")], ) def test_read_endpoint_config(filename: Text, endpoint_type: Text): conf = endpoint_utils.read_endpoint_config(filename, endpoint_type) diff --git a/tests/utils/test_io.py b/tests/utils/test_io.py index a0b3ff172b8a..ac788373a422 100644 --- a/tests/utils/test_io.py +++ b/tests/utils/test_io.py @@ -104,7 +104,7 @@ def test_directories_are_equal(tmp_path_factory: TempPathFactory): assert rasa.utils.io.are_directories_equal(dir1, dir2) -def test_directories_are_equal_sub_dir(tmp_path_factory: TempPathFactory,): +def test_directories_are_equal_sub_dir(tmp_path_factory: TempPathFactory): dir1 = tmp_path_factory.mktemp("dir1") (dir1 / "dir").mkdir() (dir1 / "dir" / "file.txt").write_text("Hello!") @@ -128,7 +128,7 @@ def test_directories_are_equal_different_file_content( assert not rasa.utils.io.are_directories_equal(dir1, dir2) -def test_directories_are_equal_extra_file(tmp_path_factory: TempPathFactory,): +def test_directories_are_equal_extra_file(tmp_path_factory: TempPathFactory): dir1 = tmp_path_factory.mktemp("dir1") (dir1 / "file.txt").write_text("Hello!") diff --git a/tests/utils/test_plotting.py b/tests/utils/test_plotting.py index 87b14e0e12f1..a0428b78500e 100644 --- a/tests/utils/test_plotting.py +++ b/tests/utils/test_plotting.py @@ -19,19 +19,13 @@ def test_paired_histogram_specification_bins( ): """Bin list should run from the lowest data value to the highest + bin_width""" for density in [False, True]: - bins, _, _, _, = rasa.utils.plotting._extract_paired_histogram_specification( - data, - num_bins=num_bins, - density=density, - x_pad_fraction=0, - y_pad_fraction=0, + bins, _, _, _ = rasa.utils.plotting._extract_paired_histogram_specification( + data, num_bins=num_bins, density=density, x_pad_fraction=0, y_pad_fraction=0 ) assert np.all(bins == expected_bins) -@pytest.mark.parametrize( - "bad_data", [([[]]), ([[], []]),], -) +@pytest.mark.parametrize("bad_data", [([[]]), ([[], []])]) def test_paired_histogram_specification_bins_raises(bad_data: List): """`_extract_paired_histogram_specification` raises a ValueError on empty data""" for density in [False, True]: @@ -45,9 +39,7 @@ def test_paired_histogram_specification_bins_raises(bad_data: List): ) -@pytest.mark.parametrize( - "bad_data", [([[]]), ([[], []]),], -) +@pytest.mark.parametrize("bad_data", [([[]]), ([[], []])]) def test_plot_paired_histogram_warns_on_bad_data(bad_data: List): """Empty data shouldn't raise an error.""" for density in [False, True]: @@ -93,8 +85,8 @@ def test_paired_histogram_specification_histograms( density: bool, expected_histograms: List[List[float]], ): - _, histograms, _, _, = rasa.utils.plotting._extract_paired_histogram_specification( - data, num_bins=num_bins, density=density, x_pad_fraction=0, y_pad_fraction=0, + _, histograms, _, _ = rasa.utils.plotting._extract_paired_histogram_specification( + data, num_bins=num_bins, density=density, x_pad_fraction=0, y_pad_fraction=0 ) assert np.all(histograms[0] == expected_histograms[0]) assert np.all(histograms[1] == expected_histograms[1]) diff --git a/tests/utils/test_train_utils.py b/tests/utils/test_train_utils.py index 6527060b30a4..809d8659fe59 100644 --- a/tests/utils/test_train_utils.py +++ b/tests/utils/test_train_utils.py @@ -114,7 +114,7 @@ def test_rank_and_mask( ], ) def test_init_split_entities_config( - split_entities_config: Any, expected_initialized_config: Dict[(str, bool)], + split_entities_config: Any, expected_initialized_config: Dict[(str, bool)] ): assert ( train_utils.init_split_entities( @@ -241,9 +241,7 @@ def test_update_confidence_type( ({}, False), ], ) -def test_tolerance_setting( - component_config: Dict[Text, float], raises_exception: bool, -): +def test_tolerance_setting(component_config: Dict[Text, float], raises_exception: bool): if raises_exception: with pytest.raises(InvalidConfigException): train_utils._check_tolerance_setting(component_config)