From 05ad68a10b8cc5814fe69614aee8d8eb6e709f52 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Sat, 22 Jun 2024 18:58:09 +0100 Subject: [PATCH 01/20] Upgrade deps and regenerate constraints --- common_build/format/constraints.txt | 19 +- common_build/taskipy/constraints.txt | 54 +++-- common_build/types/constraints.txt | 6 +- docs/constraints.txt | 127 ++++++----- docs/notebooks/constraints.txt | 316 ++++++++++++++------------- setup.py | 4 +- tests/latest/constraints.txt | 95 ++++---- tests/old/constraints.txt | 117 +++++----- 8 files changed, 375 insertions(+), 363 deletions(-) diff --git a/common_build/format/constraints.txt b/common_build/format/constraints.txt index 06b4d36f0b..1e1fc8a9dd 100644 --- a/common_build/format/constraints.txt +++ b/common_build/format/constraints.txt @@ -1,12 +1,13 @@ -black==23.3.0 -click==8.1.3 -flake8==6.0.0 -isort==5.12.0 +black==24.4.2 +click==8.1.7 +flake8==7.1.0 +isort==5.13.2 mccabe==0.7.0 mypy-extensions==1.0.0 -packaging==23.1 -pathspec==0.11.1 -platformdirs==3.5.3 -pycodestyle==2.10.0 -pyflakes==3.0.1 +packaging==24.1 +pathspec==0.12.1 +platformdirs==4.2.2 +pycodestyle==2.12.0 +pyflakes==3.2.0 tomli==2.0.1 +typing_extensions==4.12.2 diff --git a/common_build/taskipy/constraints.txt b/common_build/taskipy/constraints.txt index 12ff59b243..e11b1efa17 100644 --- a/common_build/taskipy/constraints.txt +++ b/common_build/taskipy/constraints.txt @@ -1,37 +1,35 @@ -astroid==2.15.5 -attrs==23.1.0 -black==23.3.0 -click==8.1.3 +astroid==3.2.2 +attrs==23.2.0 +black==24.4.2 +click==8.1.7 colorama==0.4.6 -coverage==7.2.7 -dill==0.3.6 -exceptiongroup==1.1.1 -filelock==3.12.2 -flake8==6.0.0 +coverage==7.5.3 +dill==0.3.8 +exceptiongroup==1.2.1 +filelock==3.15.4 +flake8==7.1.0 iniconfig==2.0.0 -isort==5.12.0 -lazy-object-proxy==1.9.0 +isort==5.13.2 mccabe==0.7.0 -mypy==1.3.0 +mypy==1.10.0 mypy-extensions==1.0.0 -packaging==23.1 -pathspec==0.11.1 -platformdirs==3.5.3 -pluggy==1.0.0 -psutil==5.9.5 -pycodestyle==2.10.0 -pyflakes==3.0.1 -pylint==2.17.4 -pytest==7.3.2 +packaging==24.1 +pathspec==0.12.1 +platformdirs==4.2.2 +pluggy==1.5.0 +psutil==5.9.8 +pycodestyle==2.12.0 +pyflakes==3.2.0 +pylint==3.2.3 +pytest==8.2.2 pytest-black==0.3.12 -pytest-cov==4.1.0 +pytest-cov==5.0.0 pytest-flake8==1.1.1 -pytest-isort==3.1.0 +pytest-isort==4.0.0 pytest-mypy==0.10.3 -pytest-pylint==0.19.0 -taskipy==1.11.0 +pytest-pylint==0.21.0 +taskipy==1.13.0 toml==0.10.2 tomli==2.0.1 -tomlkit==0.11.8 -typing_extensions==4.6.3 -wrapt==1.14.1 +tomlkit==0.12.5 +typing_extensions==4.12.2 diff --git a/common_build/types/constraints.txt b/common_build/types/constraints.txt index d5caeca8ca..e6d7d7744e 100644 --- a/common_build/types/constraints.txt +++ b/common_build/types/constraints.txt @@ -1,5 +1,5 @@ -mypy==1.3.0 +mypy==1.10.0 mypy-extensions==1.0.0 tomli==2.0.1 -types-PyYAML==6.0.12.10 -typing_extensions==4.6.3 +types-PyYAML==6.0.12.20240311 +typing_extensions==4.12.2 diff --git a/docs/constraints.txt b/docs/constraints.txt index 8b37326e5f..7dff006858 100644 --- a/docs/constraints.txt +++ b/docs/constraints.txt @@ -1,74 +1,73 @@ -accessible-pygments==0.0.4 -alabaster==0.7.13 -anyascii==0.3.2 -astroid==2.15.5 -asttokens==2.2.1 -attrs==23.1.0 -Babel==2.12.1 -backcall==0.2.0 -beautifulsoup4==4.12.2 -bleach==6.0.0 -certifi==2023.5.7 -charset-normalizer==3.1.0 +accessible-pygments==0.0.5 +alabaster==0.7.16 +astroid==3.2.2 +asttokens==2.4.1 +attrs==23.2.0 +Babel==2.15.0 +beautifulsoup4==4.12.3 +bleach==6.1.0 +certifi==2024.6.2 +charset-normalizer==3.3.2 decorator==5.1.1 defusedxml==0.7.1 -docutils==0.20.1 -executing==1.2.0 -fastjsonschema==2.17.1 -idna==3.4 +docutils==0.21.2 +exceptiongroup==1.2.1 +executing==2.0.1 +fastjsonschema==2.20.0 +idna==3.7 imagesize==1.4.1 -ipython==8.14.0 -jedi==0.18.2 -Jinja2==3.1.2 -jsonschema==4.17.3 -jupyter_client==8.2.0 -jupyter_core==5.3.0 -jupyterlab-pygments==0.2.2 -latexcodec==2.0.1 -lazy-object-proxy==1.9.0 -MarkupSafe==2.1.3 -matplotlib-inline==0.1.6 -mistune==2.0.5 -nbclient==0.8.0 -nbconvert==7.4.0 -nbformat==5.9.0 -nbsphinx==0.9.2 -packaging==23.1 -pandocfilters==1.5.0 -parso==0.8.3 -pexpect==4.8.0 -pickleshare==0.7.5 -platformdirs==3.5.3 -prompt-toolkit==3.0.38 +ipython==8.25.0 +jedi==0.19.1 +Jinja2==3.1.4 +jsonschema==4.22.0 +jsonschema-specifications==2023.12.1 +jupyter_client==8.6.2 +jupyter_core==5.7.2 +jupyterlab_pygments==0.3.0 +latexcodec==3.0.0 +MarkupSafe==2.1.5 +matplotlib-inline==0.1.7 +mistune==3.0.2 +nbclient==0.10.0 +nbconvert==7.16.4 +nbformat==5.10.4 +nbsphinx==0.9.4 +packaging==24.1 +pandocfilters==1.5.1 +parso==0.8.4 +pexpect==4.9.0 +platformdirs==4.2.2 +prompt_toolkit==3.0.47 ptyprocess==0.7.0 pure-eval==0.2.2 pybtex==0.24.0 -pybtex-docutils==1.0.2 -pydata-sphinx-theme==0.13.3 -Pygments==2.15.1 -pyrsistent==0.19.3 -python-dateutil==2.8.2 -PyYAML==6.0 -pyzmq==25.1.0 -requests==2.31.0 +pybtex-docutils==1.0.3 +pydata-sphinx-theme==0.15.3 +Pygments==2.18.0 +python-dateutil==2.9.0.post0 +PyYAML==6.0.1 +pyzmq==26.0.3 +referencing==0.35.1 +requests==2.32.3 +rpds-py==0.18.1 six==1.16.0 snowballstemmer==2.2.0 -soupsieve==2.4.1 -Sphinx==7.0.1 -sphinx-autoapi==2.1.1 -sphinxcontrib-applehelp==1.0.4 -sphinxcontrib-bibtex==2.5.0 -sphinxcontrib-devhelp==1.0.2 -sphinxcontrib-htmlhelp==2.0.1 +soupsieve==2.5 +Sphinx==7.3.7 +sphinx-autoapi==3.1.2 +sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-bibtex==2.6.2 +sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-htmlhelp==2.0.5 sphinxcontrib-jsmath==1.0.1 -sphinxcontrib-qthelp==1.0.3 -sphinxcontrib-serializinghtml==1.1.5 -stack-data==0.6.2 -tinycss2==1.2.1 -tornado==6.3.2 -traitlets==5.9.0 -typing_extensions==4.6.3 -urllib3==2.0.3 -wcwidth==0.2.6 +sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-serializinghtml==1.1.10 +stack-data==0.6.3 +tinycss2==1.3.0 +tomli==2.0.1 +tornado==6.4.1 +traitlets==5.14.3 +typing_extensions==4.12.2 +urllib3==2.2.2 +wcwidth==0.2.13 webencodings==0.5.1 -wrapt==1.14.1 diff --git a/docs/notebooks/constraints.txt b/docs/notebooks/constraints.txt index 724356c348..1eb8d9c7a8 100644 --- a/docs/notebooks/constraints.txt +++ b/docs/notebooks/constraints.txt @@ -1,189 +1,197 @@ about-time==4.2.1 -absl-py==1.4.0 -aiohttp==3.8.4 +absl-py==2.1.0 +aiohttp==3.9.5 aiohttp-cors==0.7.0 aiosignal==1.3.1 -alive-progress==3.1.4 -anyio==3.7.0 -argon2-cffi==21.3.0 +alive-progress==3.1.5 +annotated-types==0.7.0 +anyio==4.4.0 +argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 -arrow==1.2.3 -asttokens==2.2.1 +arrow==1.3.0 +asttokens==2.4.1 astunparse==1.6.3 -async-lru==2.0.2 -async-timeout==4.0.2 -attrs==23.1.0 -autograd==1.5 -Babel==2.12.1 -backcall==0.2.0 -beautifulsoup4==4.12.2 -bleach==6.0.0 -blessed==1.20.0 +async-lru==2.0.4 +async-timeout==4.0.3 +attrs==23.2.0 +autograd==1.6.2 +Babel==2.15.0 +beautifulsoup4==4.12.3 +bleach==6.1.0 Box2D==2.3.2 Box2D-kengz==2.3.3 box2d-py==2.3.5 -cachetools==5.3.1 -certifi==2023.5.7 -cffi==1.15.1 -charset-normalizer==3.1.0 -check-shapes==1.0.0 -click==8.1.3 -cloudpickle==2.2.1 +cachetools==5.3.3 +certifi==2024.6.2 +cffi==1.16.0 +charset-normalizer==3.3.2 +check-shapes==1.1.1 +clarabel==0.9.0 +click==8.1.7 +cloudpickle==3.0.0 cma==3.2.2 -colorful==0.5.5 -comm==0.1.3 -contourpy==1.0.7 -cvxpy==1.3.1 -cycler==0.11.0 -debugpy==1.6.7 +colorful==0.5.6 +comm==0.2.2 +contourpy==1.2.1 +cvxpy==1.5.2 +cycler==0.12.1 +debugpy==1.8.1 decorator==5.1.1 defusedxml==0.7.1 Deprecated==1.2.14 dill==0.3.5.1 -distlib==0.3.6 +distlib==0.3.8 dm-tree==0.1.8 -ecos==2.0.12 -exceptiongroup==1.1.1 -executing==1.2.0 -fastjsonschema==2.17.1 -filelock==3.12.2 -flatbuffers==23.5.26 -fonttools==4.40.0 +dropstackframe==0.1.0 +ecos==2.0.14 +exceptiongroup==1.2.1 +executing==2.0.1 +fastjsonschema==2.20.0 +filelock==3.15.4 +flatbuffers==24.3.25 +fonttools==4.53.0 fqdn==1.5.1 -frozenlist==1.3.3 -future==0.18.3 -gast==0.4.0 -google-api-core==2.11.0 -google-auth==2.19.1 -google-auth-oauthlib==1.0.0 +frozenlist==1.4.1 +future==1.0.0 +gast==0.5.4 +google-api-core==2.19.0 +google-auth==2.30.0 google-pasta==0.2.0 -googleapis-common-protos==1.59.1 -gpflow==2.8.1 -gpflux==0.4.2 -gpustat==1.1 +googleapis-common-protos==1.63.1 +gpflow==2.9.2 +gpflux==0.4.4 grapheme==0.6.0 -greenlet==2.0.2 -grpcio==1.51.3 +greenlet==3.0.3 +grpcio==1.64.1 gym==0.26.2 gym-notices==0.0.8 -h5py==3.8.0 -idna==3.4 -imageio==2.31.1 -ipykernel==6.23.2 -ipython==8.14.0 +h11==0.14.0 +h5py==3.11.0 +httpcore==1.0.5 +httpx==0.27.0 +idna==3.7 +imageio==2.34.1 +ipykernel==6.29.4 +ipython==8.25.0 isoduration==20.11.0 -jax==0.4.12 -jedi==0.18.2 -Jinja2==3.1.2 -json5==0.9.14 -jsonpointer==2.3 -jsonschema==4.17.3 -jupyter-events==0.6.3 -jupyter-lsp==2.2.0 -jupyter_client==8.2.0 -jupyter_core==5.3.0 -jupyter_server==2.6.0 -jupyter_server_terminals==0.4.4 -jupyterlab==4.0.2 -jupyterlab-pygments==0.2.2 -jupyterlab_server==2.22.1 -jupytext==1.14.6 -keras==2.12.0 -kiwisolver==1.4.4 -lark==1.1.5 -libclang==16.0.0 -Markdown==3.4.3 -markdown-it-py==2.2.0 -MarkupSafe==2.1.3 -matplotlib==3.7.1 -matplotlib-inline==0.1.6 -mdit-py-plugins==0.4.0 +jedi==0.19.1 +Jinja2==3.1.4 +json5==0.9.25 +jsonpointer==3.0.0 +jsonschema==4.22.0 +jsonschema-specifications==2023.12.1 +jupyter-events==0.10.0 +jupyter-lsp==2.2.5 +jupyter_client==8.6.2 +jupyter_core==5.7.2 +jupyter_server==2.14.1 +jupyter_server_terminals==0.5.3 +jupyterlab==4.2.2 +jupyterlab_pygments==0.3.0 +jupyterlab_server==2.27.2 +jupytext==1.16.2 +keras==3.3.3 +kiwisolver==1.4.5 +lark==1.1.9 +libclang==18.1.1 +linkify-it-py==2.0.3 +Markdown==3.6 +markdown-it-py==3.0.0 +MarkupSafe==2.1.5 +matplotlib==3.9.0 +matplotlib-inline==0.1.7 +mdit-py-plugins==0.4.1 mdurl==0.1.2 -mistune==2.0.5 -ml-dtypes==0.2.0 -msgpack==1.0.5 -multidict==6.0.4 -multipledispatch==0.6.0 -nbclient==0.8.0 -nbconvert==7.4.0 -nbformat==5.9.0 -nest-asyncio==1.5.6 -notebook_shim==0.2.3 -numpy==1.23.5 -nvidia-ml-py==11.525.112 -oauthlib==3.2.2 -opencensus==0.11.2 +memray==1.13.0 +mistune==3.0.2 +ml-dtypes==0.3.2 +msgpack==1.0.8 +multidict==6.0.5 +multipledispatch==1.0.0 +namex==0.0.8 +nbclient==0.10.0 +nbconvert==7.16.4 +nbformat==5.10.4 +nest-asyncio==1.6.0 +notebook_shim==0.2.4 +numpy==1.26.4 +opencensus==0.11.4 opencensus-context==0.1.3 opt-einsum==3.3.0 -osqp==0.6.3 -overrides==7.3.1 -packaging==23.1 -pandocfilters==1.5.0 -parso==0.8.3 -pexpect==4.8.0 -pickleshare==0.7.5 -Pillow==9.5.0 -platformdirs==3.5.3 -plotly==5.15.0 -prometheus-client==0.17.0 -prompt-toolkit==3.0.38 -protobuf==4.23.2 -psutil==5.9.5 +optree==0.11.0 +osqp==0.6.7 +overrides==7.7.0 +packaging==24.1 +pandocfilters==1.5.1 +parso==0.8.4 +pexpect==4.9.0 +pillow==10.3.0 +platformdirs==4.2.2 +plotly==5.22.0 +prometheus_client==0.20.0 +prompt_toolkit==3.0.47 +proto-plus==1.24.0 +protobuf==4.25.3 +psutil==6.0.0 ptyprocess==0.7.0 pure-eval==0.2.2 py-spy==0.3.14 -pyasn1==0.5.0 -pyasn1-modules==0.3.0 -pycparser==2.21 -pydantic==1.10.9 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pycparser==2.22 +pydantic==2.7.4 +pydantic_core==2.18.4 pygame==2.1.0 -Pygments==2.15.1 -pymoo==0.6.0.1 -pyparsing==3.0.9 -pyrsistent==0.19.3 -python-dateutil==2.8.2 +Pygments==2.18.0 +pymoo==0.6.1.1 +pyparsing==3.1.2 +python-dateutil==2.9.0.post0 python-json-logger==2.0.7 -PyYAML==6.0 -pyzmq==25.1.0 -qdldl==0.1.7 -ray==2.5.0 -requests==2.31.0 -requests-oauthlib==1.3.1 +PyYAML==6.0.1 +pyzmq==26.0.3 +qdldl==0.1.7.post3 +ray==2.30.0 +referencing==0.35.1 +requests==2.32.3 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 +rich==13.7.1 +rpds-py==0.18.1 rsa==4.9 -scipy==1.10.1 -scs==3.2.3 -Send2Trash==1.8.2 +scipy==1.13.1 +scs==3.2.4.post3 +Send2Trash==1.8.3 six==1.16.0 -smart-open==6.3.0 -sniffio==1.3.0 -soupsieve==2.4.1 -stack-data==0.6.2 -swig==4.1.1 +smart-open==7.0.4 +sniffio==1.3.1 +soupsieve==2.5 +stack-data==0.6.3 +swig==4.2.1 tabulate==0.9.0 -tenacity==8.2.2 -tensorboard==2.12.3 -tensorboard-data-server==0.7.0 -tensorflow==2.12.0 -tensorflow-estimator==2.12.0 -tensorflow-io-gcs-filesystem==0.32.0 -tensorflow-probability==0.19.0 -termcolor==2.3.0 -terminado==0.17.1 -tinycss2==1.2.1 -toml==0.10.2 +tenacity==8.4.1 +tensorboard==2.16.2 +tensorboard-data-server==0.7.2 +tensorflow==2.16.1 +tensorflow-io-gcs-filesystem==0.37.0 +tensorflow-probability==0.24.0 +termcolor==2.4.0 +terminado==0.18.1 +textual==0.70.0 +tf_keras==2.16.0 +tinycss2==1.3.0 tomli==2.0.1 -tornado==6.3.2 -traitlets==5.9.0 -typing_extensions==4.6.3 -uri-template==1.2.0 -urllib3==1.26.16 -virtualenv==20.21.0 -wcwidth==0.2.6 -webcolors==1.13 +tornado==6.4.1 +traitlets==5.14.3 +types-python-dateutil==2.9.0.20240316 +typing_extensions==4.12.2 +uc-micro-py==1.0.3 +uri-template==1.3.0 +urllib3==2.2.2 +virtualenv==20.26.3 +wcwidth==0.2.13 +webcolors==24.6.0 webencodings==0.5.1 -websocket-client==1.5.3 -Werkzeug==2.3.6 -wrapt==1.14.1 -yarl==1.9.2 +websocket-client==1.8.0 +Werkzeug==3.0.3 +wrapt==1.16.0 +yarl==1.9.4 diff --git a/setup.py b/setup.py index 00ac659b6c..9098c09e56 100644 --- a/setup.py +++ b/setup.py @@ -40,8 +40,8 @@ install_requires=[ "absl-py", "dill<0.3.6", - "gpflow>=2.8.1", - "gpflux>=0.4.2", + "gpflow>=2.9.2", + "gpflux>=0.4.4", "numpy", "tensorflow>=2.5; platform_system!='Darwin' or platform_machine!='arm64'", "tensorflow-macos>=2.5; platform_system=='Darwin' and platform_machine=='arm64'", diff --git a/tests/latest/constraints.txt b/tests/latest/constraints.txt index be6b0fa4b3..3c0ccc0ed4 100644 --- a/tests/latest/constraints.txt +++ b/tests/latest/constraints.txt @@ -3,80 +3,77 @@ absl-py==2.1.0 alive-progress==3.1.5 astunparse==1.6.3 autograd==1.6.2 -cachetools==5.3.2 -certifi==2024.2.2 +certifi==2024.6.2 charset-normalizer==3.3.2 check-shapes==1.1.1 -clarabel==0.6.0 +clarabel==0.9.0 cloudpickle==3.0.0 cma==3.2.2 -contourpy==1.2.0 -cvxpy==1.4.2 +contourpy==1.2.1 +cvxpy==1.5.2 cycler==0.12.1 decorator==5.1.1 Deprecated==1.2.14 dill==0.3.5.1 dm-tree==0.1.8 dropstackframe==0.1.0 -ecos==2.0.13 -exceptiongroup==1.2.0 -flatbuffers==23.5.26 -fonttools==4.48.1 -future==0.18.3 +ecos==2.0.14 +exceptiongroup==1.2.1 +flatbuffers==24.3.25 +fonttools==4.53.0 +future==1.0.0 gast==0.5.4 -google-auth==2.27.0 -google-auth-oauthlib==1.2.0 google-pasta==0.2.0 -gpflow==2.9.1 -gpflux==0.4.3 +gpflow==2.9.2 +gpflux==0.4.4 grapheme==0.6.0 greenlet==3.0.3 -grpcio==1.60.1 -h5py==3.10.0 -idna==3.6 +grpcio==1.64.1 +h5py==3.11.0 +idna==3.7 iniconfig==2.0.0 -keras==2.15.0 +keras==3.3.3 kiwisolver==1.4.5 lark==1.1.9 -libclang==16.0.6 -Markdown==3.5.2 +libclang==18.1.1 +Markdown==3.6 +markdown-it-py==3.0.0 MarkupSafe==2.1.5 -matplotlib==3.8.2 -ml-dtypes==0.2.0 +matplotlib==3.9.0 +mdurl==0.1.2 +ml-dtypes==0.3.2 multipledispatch==1.0.0 +namex==0.0.8 numpy==1.26.4 -oauthlib==3.2.2 opt-einsum==3.3.0 -osqp==0.6.5 -packaging==23.2 -pillow==10.2.0 -pluggy==1.4.0 -protobuf==4.25.2 -pyasn1==0.5.1 -pyasn1-modules==0.3.0 -pybind11==2.11.1 +optree==0.11.0 +osqp==0.6.7 +packaging==24.1 +pillow==10.3.0 +pluggy==1.5.0 +protobuf==4.25.3 +Pygments==2.18.0 pymoo==0.6.1.1 -pyparsing==3.1.1 -pytest==8.0.0 -python-dateutil==2.8.2 +pyparsing==3.1.2 +pytest==8.2.2 +python-dateutil==2.9.0.post0 PyYAML==6.0.1 -qdldl==0.1.7.post0 -requests==2.31.0 -requests-oauthlib==1.3.1 -rsa==4.9 -scipy==1.11.4 -scs==3.2.4.post1 +qdldl==0.1.7.post3 +requests==2.32.3 +rich==13.7.1 +scipy==1.13.1 +scs==3.2.4.post3 six==1.16.0 tabulate==0.9.0 -tensorboard==2.15.2 +tensorboard==2.16.2 tensorboard-data-server==0.7.2 -tensorflow==2.15.0 -tensorflow-estimator==2.15.0 -tensorflow-io-gcs-filesystem==0.36.0 -tensorflow-probability==0.23.0 +tensorflow==2.16.1 +tensorflow-io-gcs-filesystem==0.37.0 +tensorflow-probability==0.24.0 termcolor==2.4.0 +tf_keras==2.16.0 tomli==2.0.1 -typing_extensions==4.9.0 -urllib3==2.2.0 -Werkzeug==3.0.1 -wrapt==1.14.1 +typing_extensions==4.12.2 +urllib3==2.2.2 +Werkzeug==3.0.3 +wrapt==1.16.0 diff --git a/tests/old/constraints.txt b/tests/old/constraints.txt index 326b5154fa..6071de7119 100644 --- a/tests/old/constraints.txt +++ b/tests/old/constraints.txt @@ -1,68 +1,77 @@ about-time==4.2.1 -absl-py==1.4.0 -alive-progress==3.1.4 +absl-py==2.1.0 +alive-progress==3.1.5 astunparse==1.6.3 -autograd==1.5 -cachetools==5.3.1 -certifi==2023.5.7 -charset-normalizer==3.1.0 -check-shapes==1.0.0 -cloudpickle==2.2.1 +autograd==1.6.2 +cachetools==5.3.3 +certifi==2024.6.2 +charset-normalizer==3.3.2 +check-shapes==1.1.1 +clarabel==0.9.0 +cloudpickle==3.0.0 cma==3.2.2 -cvxpy==1.3.1 -cycler==0.11.0 +contourpy==1.2.1 +cvxpy==1.5.2 +cycler==0.12.1 decorator==5.1.1 Deprecated==1.2.14 dill==0.3.5.1 dm-tree==0.1.8 -ecos==2.0.12 -exceptiongroup==1.1.1 -flatbuffers==23.5.26 -fonttools==4.38.0 -future==0.18.3 -gast==0.4.0 -google-auth==2.19.1 +dropstackframe==0.1.0 +ecos==2.0.14 +exceptiongroup==1.2.1 +flatbuffers==24.3.25 +fonttools==4.53.0 +future==1.0.0 +gast==0.5.4 +google-auth==2.30.0 google-auth-oauthlib==0.4.6 google-pasta==0.2.0 -gpflow==2.8.1 -gpflux==0.4.2 +gpflow==2.9.2 +gpflux==0.4.4 grapheme==0.6.0 -greenlet==2.0.2 -grpcio==1.54.2 -h5py==3.8.0 -idna==3.4 -importlib-metadata==6.6.0 +greenlet==3.0.3 +grpcio==1.64.1 +h5py==3.11.0 +idna==3.7 iniconfig==2.0.0 keras==2.8.0 Keras-Preprocessing==1.1.2 -kiwisolver==1.4.4 -lark==1.1.5 -libclang==16.0.0 -Markdown==3.4.3 -MarkupSafe==2.1.3 -matplotlib==3.5.3 -multipledispatch==0.6.0 +kiwisolver==1.4.5 +lark==1.1.9 +libclang==18.1.1 +Markdown==3.6 +markdown-it-py==3.0.0 +MarkupSafe==2.1.5 +matplotlib==3.9.0 +mdurl==0.1.2 +ml-dtypes==0.3.2 +multipledispatch==1.0.0 +namex==0.0.8 numpy==1.21.6 oauthlib==3.2.2 opt-einsum==3.3.0 -osqp==0.6.3 -packaging==23.1 -Pillow==9.5.0 -pluggy==1.0.0 +optree==0.11.0 +osqp==0.6.7 +packaging==24.1 +pillow==10.3.0 +pluggy==1.5.0 protobuf==3.19.6 -pyasn1==0.5.0 -pyasn1-modules==0.3.0 -pymoo==0.6.0.1 -pyparsing==3.0.9 -pytest==7.3.2 -python-dateutil==2.8.2 -PyYAML==6.0 -qdldl==0.1.7 -requests==2.31.0 -requests-oauthlib==1.3.1 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +Pygments==2.18.0 +pymoo==0.6.1.1 +pyparsing==3.1.2 +pytest==8.2.2 +python-dateutil==2.9.0.post0 +PyYAML==6.0.1 +qdldl==0.1.7.post3 +requests==2.32.3 +requests-oauthlib==2.0.0 +rich==13.7.1 rsa==4.9 -scipy==1.7.3 -scs==3.2.3 +scipy==1.13.1 +scs==3.2.4.post3 six==1.16.0 tabulate==0.9.0 tensorboard==2.8.0 @@ -70,12 +79,12 @@ tensorboard-data-server==0.6.1 tensorboard-plugin-wit==1.8.1 tensorflow==2.8.4 tensorflow-estimator==2.8.0 -tensorflow-io-gcs-filesystem==0.32.0 +tensorflow-io-gcs-filesystem==0.37.0 tensorflow-probability==0.13.0 -termcolor==2.3.0 +termcolor==2.4.0 +tf_keras==2.16.0 tomli==2.0.1 -typing_extensions==4.6.3 -urllib3==1.26.16 -Werkzeug==2.2.3 -wrapt==1.14.1 -zipp==3.15.0 +typing_extensions==4.12.2 +urllib3==2.2.2 +Werkzeug==3.0.3 +wrapt==1.16.0 From b23f4733392eb34da59cfda08ffba9401a08f8ad Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Sat, 22 Jun 2024 19:09:29 +0100 Subject: [PATCH 02/20] Start converting tf.keras to tf_keras --- docs/notebooks/deep_ensembles.pct.py | 7 ++-- .../models/keras/test_predictions.py | 6 +-- .../integration/test_bayesian_optimization.py | 5 ++- tests/unit/models/gpflux/test_interface.py | 5 ++- tests/unit/models/gpflux/test_models.py | 35 +++++++++-------- tests/unit/models/keras/test_architectures.py | 17 ++++---- tests/unit/models/keras/test_builders.py | 6 +-- tests/unit/models/keras/test_interface.py | 3 +- tests/unit/models/keras/test_models.py | 39 ++++++++++--------- tests/util/models/gpflux/models.py | 3 +- tests/util/models/keras/models.py | 5 ++- trieste/models/gpflux/interface.py | 3 +- trieste/models/gpflux/models.py | 25 ++++++------ trieste/models/keras/architectures.py | 21 +++++----- trieste/models/keras/builders.py | 4 +- trieste/models/keras/interface.py | 3 +- trieste/models/keras/models.py | 15 +++---- trieste/models/optimizer.py | 9 +++-- 18 files changed, 113 insertions(+), 98 deletions(-) diff --git a/docs/notebooks/deep_ensembles.pct.py b/docs/notebooks/deep_ensembles.pct.py index e7eef55571..5c57b602c6 100644 --- a/docs/notebooks/deep_ensembles.pct.py +++ b/docs/notebooks/deep_ensembles.pct.py @@ -72,6 +72,7 @@ def objective(x, error=True): # %% +from gpflow import tf_keras from trieste.models.keras import ( DeepEnsemble, KerasPredictor, @@ -94,7 +95,7 @@ def build_cubic_model(data: Dataset) -> DeepEnsemble: "epochs": 1000, "verbose": 0, } - optimizer = KerasOptimizer(tf.keras.optimizers.Adam(0.01), fit_args) + optimizer = KerasOptimizer(tf_keras.optimizers.Adam(0.01), fit_args) return DeepEnsemble(keras_ensemble, optimizer) @@ -205,11 +206,11 @@ def build_model(data: Dataset) -> DeepEnsemble: "batch_size": 10, "epochs": 1000, "callbacks": [ - tf.keras.callbacks.EarlyStopping(monitor="loss", patience=100) + tf_keras.callbacks.EarlyStopping(monitor="loss", patience=100) ], "verbose": 0, } - optimizer = KerasOptimizer(tf.keras.optimizers.Adam(0.001), fit_args) + optimizer = KerasOptimizer(tf_keras.optimizers.Adam(0.001), fit_args) return DeepEnsemble(keras_ensemble, optimizer) diff --git a/tests/integration/models/keras/test_predictions.py b/tests/integration/models/keras/test_predictions.py index 6347c928f6..b35218b9d5 100644 --- a/tests/integration/models/keras/test_predictions.py +++ b/tests/integration/models/keras/test_predictions.py @@ -16,7 +16,7 @@ import numpy as np import pytest -import tensorflow as tf +from gpflow.keras import tf_keras from tests.util.misc import hartmann_6_dataset, random_seed from trieste.models.keras import DeepEnsemble, build_keras_ensemble @@ -34,7 +34,7 @@ def test_neural_network_ensemble_predictions_close_to_actuals() -> None: "batch_size": 128, "epochs": 1500, "callbacks": [ - tf.keras.callbacks.EarlyStopping( + tf_keras.callbacks.EarlyStopping( monitor="loss", patience=100, restore_best_weights=True ) ], @@ -42,7 +42,7 @@ def test_neural_network_ensemble_predictions_close_to_actuals() -> None: } model = DeepEnsemble( keras_ensemble, - KerasOptimizer(tf.keras.optimizers.Adam(), fit_args), + KerasOptimizer(tf_keras.optimizers.Adam(), fit_args), ) model.optimize(example_data) predicted_means, _ = model.predict(example_data.query_points) diff --git a/tests/integration/test_bayesian_optimization.py b/tests/integration/test_bayesian_optimization.py index 6965253e6c..a5efcb4651 100644 --- a/tests/integration/test_bayesian_optimization.py +++ b/tests/integration/test_bayesian_optimization.py @@ -25,6 +25,7 @@ import pytest import tensorflow as tf from _pytest.mark import ParameterSet +from gpflow.keras import tf_keras from tests.util.misc import random_seed from trieste.acquisition import ( @@ -712,13 +713,13 @@ def patched_tf_cast(x: TensorType, dtype: tf.DType) -> TensorType: "batch_size": 20, "epochs": 200, "callbacks": [ - tf.keras.callbacks.EarlyStopping( + tf_keras.callbacks.EarlyStopping( monitor="loss", patience=25, restore_best_weights=True ) ], "verbose": 0, } - de_optimizer = KerasOptimizer(tf.keras.optimizers.Adam(0.01), fit_args) + de_optimizer = KerasOptimizer(tf_keras.optimizers.Adam(0.01), fit_args) model = DeepEnsemble(keras_ensemble, de_optimizer, **model_args) else: diff --git a/tests/unit/models/gpflux/test_interface.py b/tests/unit/models/gpflux/test_interface.py index b7b5fb860f..fa64efa216 100644 --- a/tests/unit/models/gpflux/test_interface.py +++ b/tests/unit/models/gpflux/test_interface.py @@ -22,6 +22,7 @@ import tensorflow as tf from check_shapes import inherit_check_shapes from gpflow.conditionals.util import sample_mvn +from gpflow.keras import tf_keras from gpflux.helpers import construct_basic_inducing_variables, construct_basic_kernel from gpflux.layers import GPLayer from gpflux.models import DeepGP @@ -53,11 +54,11 @@ def model_gpflux(self) -> DeepGP: return self._model_gpflux @property - def model_keras(self) -> tf.keras.Model: + def model_keras(self) -> tf_keras.Model: return self._model_keras @property - def optimizer(self) -> tf.keras.optimizers.Optimizer: + def optimizer(self) -> tf_keras.optimizers.Optimizer: return self._optimizer @inherit_check_shapes diff --git a/tests/unit/models/gpflux/test_models.py b/tests/unit/models/gpflux/test_models.py index 65abf611e5..9347db96fa 100644 --- a/tests/unit/models/gpflux/test_models.py +++ b/tests/unit/models/gpflux/test_models.py @@ -37,6 +37,7 @@ import numpy.testing as npt import pytest import tensorflow as tf +from gpflow.keras import tf_keras from gpflux.models import DeepGP from gpflux.models.deep_gp import sample_dgp from tensorflow.python.keras.callbacks import Callback @@ -70,8 +71,8 @@ def test_deep_gaussian_process_raises_for_non_tf_optimizer( def test_deep_gaussian_process_raises_for_keras_layer() -> None: - keras_layer_1 = tf.keras.layers.Dense(50, activation="relu") - keras_layer_2 = tf.keras.layers.Dense(2, activation="relu") + keras_layer_1 = tf_keras.layers.Dense(50, activation="relu") + keras_layer_2 = tf_keras.layers.Dense(2, activation="relu") kernel = gpflow.kernels.SquaredExponential() num_inducing = 5 @@ -268,7 +269,7 @@ def scheduler(epoch: int, lr: float) -> float: "epochs": epochs, "batch_size": 100, "verbose": 0, - "callbacks": tf.keras.callbacks.LearningRateScheduler(scheduler), + "callbacks": tf_keras.callbacks.LearningRateScheduler(scheduler), } optimizer = KerasOptimizer(tf.optimizers.Adam(init_lr), fit_args) @@ -296,7 +297,7 @@ def test_deep_gaussian_process_with_lr_scheduler( "verbose": 0, } - lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( + lr_schedule = tf_keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=init_lr, decay_steps=1, decay_rate=0.5 ) optimizer = KerasOptimizer(tf.optimizers.Adam(lr_schedule), fit_args) @@ -459,22 +460,22 @@ def test_deepgp_deep_copies_optimizer_state() -> None: "callbacks", [ [ - tf.keras.callbacks.CSVLogger("csv"), - tf.keras.callbacks.EarlyStopping(monitor="loss", patience=100), - tf.keras.callbacks.History(), - tf.keras.callbacks.LambdaCallback(lambda epoch, lr: lr), - tf.keras.callbacks.LearningRateScheduler(lambda epoch, lr: lr), - tf.keras.callbacks.ProgbarLogger(), - tf.keras.callbacks.ReduceLROnPlateau(), - tf.keras.callbacks.RemoteMonitor(), - tf.keras.callbacks.TensorBoard(), - tf.keras.callbacks.TerminateOnNaN(), + tf_keras.callbacks.CSVLogger("csv"), + tf_keras.callbacks.EarlyStopping(monitor="loss", patience=100), + tf_keras.callbacks.History(), + tf_keras.callbacks.LambdaCallback(lambda epoch, lr: lr), + tf_keras.callbacks.LearningRateScheduler(lambda epoch, lr: lr), + tf_keras.callbacks.ProgbarLogger(), + tf_keras.callbacks.ReduceLROnPlateau(), + tf_keras.callbacks.RemoteMonitor(), + tf_keras.callbacks.TensorBoard(), + tf_keras.callbacks.TerminateOnNaN(), ], pytest.param( [ - tf.keras.callbacks.experimental.BackupAndRestore("backup"), - tf.keras.callbacks.BaseLogger(), - tf.keras.callbacks.ModelCheckpoint("weights"), + tf_keras.callbacks.experimental.BackupAndRestore("backup"), + tf_keras.callbacks.BaseLogger(), + tf_keras.callbacks.ModelCheckpoint("weights"), ], marks=pytest.mark.skip(reason="callbacks currently causing optimize to fail"), ), diff --git a/tests/unit/models/keras/test_architectures.py b/tests/unit/models/keras/test_architectures.py index ca89b14e10..a34c7229e5 100644 --- a/tests/unit/models/keras/test_architectures.py +++ b/tests/unit/models/keras/test_architectures.py @@ -18,6 +18,7 @@ import pytest import tensorflow as tf import tensorflow_probability as tfp +from gpflow.keras import tf_keras from tests.util.misc import empty_dataset from tests.util.models.keras.models import trieste_keras_ensemble_model @@ -64,7 +65,7 @@ def test_keras_ensemble_model_attributes() -> None: example_data = empty_dataset([1], [1]) keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE) - assert isinstance(keras_ensemble.model, tf.keras.Model) + assert isinstance(keras_ensemble.model, tf_keras.Model) def test_keras_ensemble_ensemble_size_attributes(ensemble_size: int) -> None: @@ -94,7 +95,7 @@ def test_keras_ensemble_build_ensemble_seems_correct( keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size, independent_normal) # basics - assert isinstance(keras_ensemble.model, tf.keras.Model) + assert isinstance(keras_ensemble.model, tf_keras.Model) assert keras_ensemble.model.built # check ensemble size @@ -150,14 +151,14 @@ def test_keras_ensemble_network_raises_on_incorrect_tensor_spec() -> None: _DummyKerasEnsembleNetwork( [1], tf.TensorSpec(shape=(1,), dtype=tf.float32), - tf.keras.losses.MeanSquaredError(), + tf_keras.losses.MeanSquaredError(), ) with pytest.raises(ValueError): _DummyKerasEnsembleNetwork( tf.TensorSpec(shape=(1,), dtype=tf.float32), [1], - tf.keras.losses.MeanSquaredError(), + tf_keras.losses.MeanSquaredError(), ) @@ -232,7 +233,7 @@ def test_gaussian_network_is_correctly_constructed( hidden_layer_args, ) network_input, network_output = network.connect_layers() - network_built = tf.keras.Model(inputs=network_input, outputs=network_output) + network_built = tf_keras.Model(inputs=network_input, outputs=network_output) # check input shape assert network_input.shape[1:] == tf.TensorShape(query_point_shape) @@ -243,7 +244,7 @@ def test_gaussian_network_is_correctly_constructed( assert predictions.shape == tf.TensorShape([n_obs] + observation_shape) # check layers - assert isinstance(network_built.layers[0], tf.keras.layers.InputLayer) + assert isinstance(network_built.layers[0], tf_keras.layers.InputLayer) assert len(network_built.layers[1:-2]) == num_hidden_layers assert isinstance(network_built.layers[-1], tfp.layers.DistributionLambda) @@ -253,7 +254,7 @@ def test_multivariatenormaltril_layer_fails_to_serialilze() -> None: # (with different errors in TF2.4 and TF2.5). When that's fixed we can remove our workaround. layer = tfp.layers.MultivariateNormalTriL(1) with pytest.raises(Exception): - serialized = tf.keras.utils.serialize_keras_object(layer) - tf.keras.utils.deserialize_keras_object( + serialized = tf_keras.utils.serialize_keras_object(layer) + tf_keras.utils.deserialize_keras_object( serialized, custom_objects={"MultivariateNormalTriL": tfp.layers.MultivariateNormalTriL} ) diff --git a/tests/unit/models/keras/test_builders.py b/tests/unit/models/keras/test_builders.py index 38ab7926d7..5c8fe2e35b 100644 --- a/tests/unit/models/keras/test_builders.py +++ b/tests/unit/models/keras/test_builders.py @@ -15,14 +15,14 @@ from typing import Union import pytest -import tensorflow as tf import tensorflow_probability as tfp +from gpflow.keras import tf_keras from tests.util.misc import empty_dataset from trieste.models.keras import build_keras_ensemble -@pytest.mark.parametrize("units, activation", [(10, "relu"), (50, tf.keras.activations.tanh)]) +@pytest.mark.parametrize("units, activation", [(10, "relu"), (50, tf_keras.activations.tanh)]) @pytest.mark.parametrize("ensemble_size", [2, 5]) @pytest.mark.parametrize("independent_normal", [False, True]) @pytest.mark.parametrize("num_hidden_layers", [0, 1, 3]) @@ -32,7 +32,7 @@ def test_build_keras_ensemble( ensemble_size: int, num_hidden_layers: int, units: int, - activation: Union[str, tf.keras.layers.Activation], + activation: Union[str, tf_keras.layers.Activation], independent_normal: bool, ) -> None: example_data = empty_dataset([num_outputs], [num_outputs]) diff --git a/tests/unit/models/keras/test_interface.py b/tests/unit/models/keras/test_interface.py index 59df296ca5..87ad741abc 100644 --- a/tests/unit/models/keras/test_interface.py +++ b/tests/unit/models/keras/test_interface.py @@ -17,6 +17,7 @@ import gpflow import pytest import tensorflow as tf +from gpflow.keras import tf_keras from tests.util.misc import empty_dataset, raise_exc from trieste.models.keras import KerasPredictor @@ -25,7 +26,7 @@ class _DummyKerasPredictor(KerasPredictor): @property - def model(self) -> tf.keras.Model: + def model(self) -> tf_keras.Model: return raise_exc diff --git a/tests/unit/models/keras/test_models.py b/tests/unit/models/keras/test_models.py index d067e842ad..f66d696be5 100644 --- a/tests/unit/models/keras/test_models.py +++ b/tests/unit/models/keras/test_models.py @@ -24,6 +24,7 @@ import pytest import tensorflow as tf import tensorflow_probability as tfp +from gpflow.keras import tf_keras from tensorflow.python.keras.callbacks import Callback from tests.util.misc import ShapeLike, empty_dataset, random_seed @@ -178,7 +179,7 @@ def test_deep_ensemble_optimizer_changed_correctly() -> None: "batch_size": 10, } custom_optimizer = tf.optimizers.RMSprop() - custom_loss = tf.keras.losses.MeanSquaredError() + custom_loss = tf_keras.losses.MeanSquaredError() optimizer_wrapper = KerasOptimizer(custom_optimizer, custom_fit_args, custom_loss) keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE) @@ -243,7 +244,7 @@ def scheduler(epoch: int, lr: float) -> float: "epochs": epochs, "batch_size": 100, "verbose": 0, - "callbacks": tf.keras.callbacks.LearningRateScheduler(scheduler), + "callbacks": tf_keras.callbacks.LearningRateScheduler(scheduler), } optimizer = KerasOptimizer(tf.optimizers.Adam(init_lr), fit_args) model = DeepEnsemble(keras_ensemble, optimizer) @@ -272,7 +273,7 @@ def test_deep_ensemble_with_lr_scheduler() -> None: "verbose": 0, } - lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( + lr_schedule = tf_keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=init_lr, decay_steps=1, decay_rate=0.5 ) optimizer = KerasOptimizer(tf.optimizers.Adam(lr_schedule), fit_args) @@ -424,7 +425,7 @@ def test_deep_ensemble_optimize(ensemble_size: int, bootstrap_data: bool, epochs "epochs": epochs, "batch_size": 10, } - custom_loss = tf.keras.losses.MeanSquaredError() + custom_loss = tf_keras.losses.MeanSquaredError() optimizer_wrapper = KerasOptimizer(custom_optimizer, custom_fit_args, custom_loss) model = DeepEnsemble(keras_ensemble, optimizer_wrapper, bootstrap_data) @@ -658,22 +659,22 @@ def test_deep_ensemble_deep_copies_optimizer_state() -> None: "callbacks", [ [ - tf.keras.callbacks.CSVLogger("csv"), - tf.keras.callbacks.EarlyStopping(monitor="loss", patience=100), - tf.keras.callbacks.History(), - tf.keras.callbacks.LambdaCallback(lambda epoch, lr: lr), - tf.keras.callbacks.LearningRateScheduler(lambda epoch, lr: lr), - tf.keras.callbacks.ProgbarLogger(), - tf.keras.callbacks.ReduceLROnPlateau(), - tf.keras.callbacks.RemoteMonitor(), - tf.keras.callbacks.TensorBoard(), - tf.keras.callbacks.TerminateOnNaN(), + tf_keras.callbacks.CSVLogger("csv"), + tf_keras.callbacks.EarlyStopping(monitor="loss", patience=100), + tf_keras.callbacks.History(), + tf_keras.callbacks.LambdaCallback(lambda epoch, lr: lr), + tf_keras.callbacks.LearningRateScheduler(lambda epoch, lr: lr), + tf_keras.callbacks.ProgbarLogger(), + tf_keras.callbacks.ReduceLROnPlateau(), + tf_keras.callbacks.RemoteMonitor(), + tf_keras.callbacks.TensorBoard(), + tf_keras.callbacks.TerminateOnNaN(), ], pytest.param( [ - tf.keras.callbacks.experimental.BackupAndRestore("backup"), - tf.keras.callbacks.BaseLogger(), - tf.keras.callbacks.ModelCheckpoint("weights"), + tf_keras.callbacks.experimental.BackupAndRestore("backup"), + tf_keras.callbacks.BaseLogger(), + tf_keras.callbacks.ModelCheckpoint("weights"), ], marks=pytest.mark.skip(reason="callbacks currently causing optimize to fail"), ), @@ -704,12 +705,12 @@ def test_deep_ensemble_deep_copies_optimizer_callback_models() -> None: model.optimize(new_example_data) callback = model.optimizer.fit_args["callbacks"][0] - assert isinstance(callback, tf.keras.callbacks.EarlyStopping) + assert isinstance(callback, tf_keras.callbacks.EarlyStopping) assert callback.model is model.model model_copy = copy.deepcopy(model) callback_copy = model_copy.optimizer.fit_args["callbacks"][0] - assert isinstance(callback_copy, tf.keras.callbacks.EarlyStopping) + assert isinstance(callback_copy, tf_keras.callbacks.EarlyStopping) assert callback_copy.model is model_copy.model is not callback.model npt.assert_equal(callback_copy.model.get_weights(), callback.model.get_weights()) diff --git a/tests/util/models/gpflux/models.py b/tests/util/models/gpflux/models.py index 254ca73c5b..412ed21972 100644 --- a/tests/util/models/gpflux/models.py +++ b/tests/util/models/gpflux/models.py @@ -21,6 +21,7 @@ import gpflow import tensorflow as tf +from gpflow.keras import tf_keras from gpflow.utilities import set_trainable from gpflux.architectures import Config, build_constant_input_dim_deep_gp from gpflux.helpers import construct_basic_kernel @@ -150,7 +151,7 @@ def scheduler(epoch: int, lr: float) -> float: "batch_size": batch_size, "epochs": epochs, "verbose": 0, - "callbacks": tf.keras.callbacks.LearningRateScheduler(scheduler), + "callbacks": tf_keras.callbacks.LearningRateScheduler(scheduler), } optimizer = KerasOptimizer(tf.optimizers.Adam(learning_rate), fit_args) diff --git a/tests/util/models/keras/models.py b/tests/util/models/keras/models.py index 8e40a9f71b..9b0c5e67cc 100644 --- a/tests/util/models/keras/models.py +++ b/tests/util/models/keras/models.py @@ -21,6 +21,7 @@ from typing import Any, Mapping, Optional, Tuple import tensorflow as tf +from gpflow.keras import tf_keras from packaging.version import Version from trieste.data import Dataset @@ -67,7 +68,7 @@ def trieste_deep_ensemble_model( ) -> Tuple[DeepEnsemble, KerasEnsemble, KerasOptimizer]: keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size, independent_normal) - optimizer = tf.keras.optimizers.Adam() + optimizer = tf_keras.optimizers.Adam() fit_args = { "batch_size": 100, "epochs": 1, @@ -83,7 +84,7 @@ def trieste_deep_ensemble_model( return model, keras_ensemble, optimizer_wrapper -def keras_optimizer_weights(optimizer: tf.keras.optimizers.Optimizer) -> Optional[TensorType]: +def keras_optimizer_weights(optimizer: tf_keras.optimizers.Optimizer) -> Optional[TensorType]: # optimizer weight API was changed in TF 2.11: https://github.com/keras-team/keras/issues/16983 if Version(tf.__version__) < Version("2.11"): return optimizer.get_weights() diff --git a/trieste/models/gpflux/interface.py b/trieste/models/gpflux/interface.py index cd4d1b1b0e..e471dca763 100644 --- a/trieste/models/gpflux/interface.py +++ b/trieste/models/gpflux/interface.py @@ -19,6 +19,7 @@ import tensorflow as tf from check_shapes import inherit_check_shapes from gpflow.base import Module +from gpflow.keras import tf_keras from ...types import TensorType from ..interfaces import SupportsGetObservationNoise, SupportsPredictY @@ -51,7 +52,7 @@ def model_gpflux(self) -> Module: @property @abstractmethod - def model_keras(self) -> tf.keras.Model: + def model_keras(self) -> tf_keras.Model: """Returns the compiled Keras model for training.""" @property diff --git a/trieste/models/gpflux/models.py b/trieste/models/gpflux/models.py index 79a1777df5..3d01473d8c 100644 --- a/trieste/models/gpflux/models.py +++ b/trieste/models/gpflux/models.py @@ -22,6 +22,7 @@ import tensorflow as tf from check_shapes import inherit_check_shapes from gpflow.inducing_variables import InducingPoints +from gpflow.keras import tf_keras from gpflux.layers import GPLayer, LatentVariableLayer from gpflux.models import DeepGP from tensorflow.python.keras.callbacks import Callback @@ -129,7 +130,7 @@ def __init__( ) if not isinstance( - self.optimizer.optimizer.lr, tf.keras.optimizers.schedules.LearningRateSchedule + self.optimizer.optimizer.lr, tf_keras.optimizers.schedules.LearningRateSchedule ): self.original_lr = self.optimizer.optimizer.lr.numpy() @@ -146,7 +147,7 @@ def scheduler(epoch: int, lr: float) -> float: "verbose": 0, "epochs": epochs, "batch_size": 1000, - "callbacks": [tf.keras.callbacks.LearningRateScheduler(scheduler)], + "callbacks": [tf_keras.callbacks.LearningRateScheduler(scheduler)], } if self.optimizer.metrics is None: @@ -156,12 +157,12 @@ def scheduler(epoch: int, lr: float) -> float: # inputs and targets need to be redone with a float64 dtype to avoid setting the keras # backend to float64, this is likely to be fixed in GPflux, see issue: # https://github.com/secondmind-labs/GPflux/issues/76 - self._model_gpflux.inputs = tf.keras.Input( + self._model_gpflux.inputs = tf_keras.Input( tuple(self._model_gpflux.inputs.shape[:-1]), name=self._model_gpflux.inputs.name, dtype=tf.float64, ) - self._model_gpflux.targets = tf.keras.Input( + self._model_gpflux.targets = tf_keras.Input( tuple(self._model_gpflux.targets.shape[:-1]), name=self._model_gpflux.targets.name, dtype=tf.float64, @@ -196,7 +197,7 @@ def __getstate__(self) -> dict[str, Any]: elif callback.model: callback.model = (callback.model.to_json(), callback.model.get_weights()) # don't pickle tensorboard writers either; they'll be recreated when needed - if isinstance(callback, tf.keras.callbacks.TensorBoard): + if isinstance(callback, tf_keras.callbacks.TensorBoard): tensorboard_writers.append(callback._writers) callback._writers = {} state["_optimizer"] = dill.dumps(state["_optimizer"]) @@ -209,7 +210,7 @@ def __getstate__(self) -> dict[str, Any]: for callback, model in zip(self._optimizer.fit_args.get("callbacks", []), saved_models): callback.model = model for callback, writers in zip( - (cb for cb in callbacks if isinstance(cb, tf.keras.callbacks.TensorBoard)), + (cb for cb in callbacks if isinstance(cb, tf_keras.callbacks.TensorBoard)), tensorboard_writers, ): callback._writers = writers @@ -246,12 +247,12 @@ def __setstate__(self, state: dict[str, Any]) -> None: # inputs and targets need to be redone with a float64 dtype to avoid setting the keras # backend to float64, this is likely to be fixed in GPflux, see issue: # https://github.com/secondmind-labs/GPflux/issues/76 - self._model_gpflux.inputs = tf.keras.Input( + self._model_gpflux.inputs = tf_keras.Input( tuple(self._model_gpflux.inputs.shape[:-1]), name=self._model_gpflux.inputs.name, dtype=tf.float64, ) - self._model_gpflux.targets = tf.keras.Input( + self._model_gpflux.targets = tf_keras.Input( tuple(self._model_gpflux.targets.shape[:-1]), name=self._model_gpflux.targets.name, dtype=tf.float64, @@ -265,7 +266,7 @@ def __setstate__(self, state: dict[str, Any]) -> None: callback.set_model(self._model_keras) elif callback.model: model_json, weights = callback.model - model = tf.keras.models.model_from_json(model_json) + model = tf_keras.models.model_from_json(model_json) model.set_weights(weights) callback.set_model(model) @@ -284,7 +285,7 @@ def __setstate__(self, state: dict[str, Any]) -> None: self._model_keras.history.set_model(self._model_keras) elif self._model_keras.history.model: model_json, weights = self._model_keras.history.model - model = tf.keras.models.model_from_json(model_json) + model = tf_keras.models.model_from_json(model_json) model.set_weights(weights) self._model_keras.history.set_model(model) @@ -301,7 +302,7 @@ def model_gpflux(self) -> DeepGP: return self._model_gpflux @property - def model_keras(self) -> tf.keras.Model: + def model_keras(self) -> tf_keras.Model: return self._model_keras @inherit_check_shapes @@ -394,7 +395,7 @@ def optimize(self, dataset: Dataset) -> keras.callbacks.History: # different. Therefore, we make sure the learning rate is set back to its initial value. # However, this is not needed for `LearningRateSchedule` instances. if not isinstance( - self.optimizer.optimizer.lr, tf.keras.optimizers.schedules.LearningRateSchedule + self.optimizer.optimizer.lr, tf_keras.optimizers.schedules.LearningRateSchedule ): self.optimizer.optimizer.lr.assign(self.original_lr) diff --git a/trieste/models/keras/architectures.py b/trieste/models/keras/architectures.py index 76f7c76f1c..89465fefcf 100644 --- a/trieste/models/keras/architectures.py +++ b/trieste/models/keras/architectures.py @@ -26,6 +26,7 @@ import numpy as np import tensorflow as tf import tensorflow_probability as tfp +from gpflow.keras import tf_keras try: from keras.src.saving.serialization_lib import SafeModeScope @@ -94,7 +95,7 @@ def __repr__(self) -> str: return f"KerasEnsemble({self._networks!r})" @property - def model(self) -> tf.keras.Model: + def model(self) -> tf_keras.Model: """Returns built but uncompiled Keras ensemble model.""" return self._model @@ -106,7 +107,7 @@ def ensemble_size(self) -> int: """ return len(self._networks) - def _build_ensemble(self) -> tf.keras.Model: + def _build_ensemble(self) -> tf_keras.Model: """ Builds the ensemble model by combining all the individual networks in a single Keras model. This method relies on ``connect_layers`` method of :class:`KerasEnsembleNetwork` objects @@ -116,7 +117,7 @@ def _build_ensemble(self) -> tf.keras.Model: """ inputs, outputs = zip(*[network.connect_layers() for network in self._networks]) - return tf.keras.Model(inputs=inputs, outputs=outputs) + return tf_keras.Model(inputs=inputs, outputs=outputs) def __getstate__(self) -> dict[str, Any]: # When pickling use to_json to save the model. @@ -148,7 +149,7 @@ def __setstate__(self, state: dict[str, Any]) -> None: # TF 2.15 disallows loading lambdas without "safe-mode" being disabled # unfortunately, tfp.layers.DistributionLambda seems to use lambdas with SafeModeScope(False): - self._model = tf.keras.models.model_from_json( + self._model = tf_keras.models.model_from_json( state["_model"], custom_objects={"MultivariateNormalTriL": MultivariateNormalTriL} ) self._model.set_weights(state["_weights"]) @@ -160,7 +161,7 @@ def __setstate__(self, state: dict[str, Any]) -> None: self._model.history.set_model(self._model) elif self._model.history.model: model_json, weights = self._model.history.model - model = tf.keras.models.model_from_json( + model = tf_keras.models.model_from_json( model_json, custom_objects={"MultivariateNormalTriL": MultivariateNormalTriL}, ) @@ -297,8 +298,8 @@ def __init__( self._hidden_layer_args = hidden_layer_args self._independent = independent - def _gen_input_tensor(self) -> tf.keras.Input: - input_tensor = tf.keras.Input( + def _gen_input_tensor(self) -> tf_keras.Input: + input_tensor = tf_keras.Input( shape=self.input_tensor_spec.shape, dtype=self.input_tensor_spec.dtype, name=self.input_layer_name, @@ -308,7 +309,7 @@ def _gen_input_tensor(self) -> tf.keras.Input: def _gen_hidden_layers(self, input_tensor: tf.Tensor) -> tf.Tensor: for index, hidden_layer_args in enumerate(self._hidden_layer_args): layer_name = f"{self.network_name}dense_{index}" - layer = tf.keras.layers.Dense( + layer = tf_keras.layers.Dense( **hidden_layer_args, name=layer_name, dtype=input_tensor.dtype.name ) input_tensor = layer(input_tensor) @@ -319,7 +320,7 @@ def _gen_multi_output_layer(self, input_tensor: tf.Tensor) -> tf.Tensor: dist_layer = tfp.layers.IndependentNormal if self._independent else MultivariateNormalTriL n_params = dist_layer.params_size(self.flattened_output_shape) - parameter_layer = tf.keras.layers.Dense( + parameter_layer = tf_keras.layers.Dense( n_params, name=self.network_name + "dense_parameters", dtype=input_tensor.dtype.name )(input_tensor) @@ -333,7 +334,7 @@ def _gen_multi_output_layer(self, input_tensor: tf.Tensor) -> tf.Tensor: return distribution def _gen_single_output_layer(self, input_tensor: tf.Tensor) -> tf.Tensor: - parameter_layer = tf.keras.layers.Dense( + parameter_layer = tf_keras.layers.Dense( 2, name=self.network_name + "dense_parameters", dtype=input_tensor.dtype.name )(input_tensor) diff --git a/trieste/models/keras/builders.py b/trieste/models/keras/builders.py index 8cd608e317..44963f32c5 100644 --- a/trieste/models/keras/builders.py +++ b/trieste/models/keras/builders.py @@ -22,7 +22,7 @@ from typing import Union -import tensorflow as tf +from gpflow.keras import tf_keras from ...data import Dataset from .architectures import GaussianNetwork, KerasEnsemble @@ -34,7 +34,7 @@ def build_keras_ensemble( ensemble_size: int = 5, num_hidden_layers: int = 2, units: int = 25, - activation: Union[str, tf.keras.layers.Activation] = "relu", + activation: Union[str, tf_keras.layers.Activation] = "relu", independent_normal: bool = False, ) -> KerasEnsemble: """ diff --git a/trieste/models/keras/interface.py b/trieste/models/keras/interface.py index 18a4afbc48..fa48b0db1a 100644 --- a/trieste/models/keras/interface.py +++ b/trieste/models/keras/interface.py @@ -20,6 +20,7 @@ import tensorflow as tf import tensorflow_probability as tfp from check_shapes import inherit_check_shapes +from gpflow.keras import tf_keras from typing_extensions import Protocol, runtime_checkable from ...data import Dataset @@ -53,7 +54,7 @@ def __init__(self, optimizer: Optional[KerasOptimizer] = None): @property @abstractmethod - def model(self) -> tf.keras.Model: + def model(self) -> tf_keras.Model: """The compiled Keras model.""" raise NotImplementedError diff --git a/trieste/models/keras/models.py b/trieste/models/keras/models.py index 10e27acc6f..563ce8de43 100644 --- a/trieste/models/keras/models.py +++ b/trieste/models/keras/models.py @@ -23,6 +23,7 @@ import tensorflow_probability as tfp import tensorflow_probability.python.distributions as tfd from check_shapes import inherit_check_shapes +from gpflow.keras import tf_keras from tensorflow.python.keras.callbacks import Callback from ... import logging @@ -135,7 +136,7 @@ def __init__( "epochs": 3000, "batch_size": 16, "callbacks": [ - tf.keras.callbacks.EarlyStopping( + tf_keras.callbacks.EarlyStopping( monitor="loss", patience=50, restore_best_weights=True ) ], @@ -155,7 +156,7 @@ def __init__( ) if not isinstance( - self.optimizer.optimizer.lr, tf.keras.optimizers.schedules.LearningRateSchedule + self.optimizer.optimizer.lr, tf_keras.optimizers.schedules.LearningRateSchedule ): self.original_lr = self.optimizer.optimizer.lr.numpy() self._absolute_epochs = 0 @@ -173,7 +174,7 @@ def __repr__(self) -> str: ) @property - def model(self) -> tf.keras.Model: + def model(self) -> tf_keras.Model: """Returns compiled Keras ensemble model.""" return self._model.model @@ -413,7 +414,7 @@ def optimize(self, dataset: Dataset) -> keras.callbacks.History: # different. Therefore, we make sure the learning rate is set back to its initial value. # However, this is not needed for `LearningRateSchedule` instances. if not isinstance( - self.optimizer.optimizer.lr, tf.keras.optimizers.schedules.LearningRateSchedule + self.optimizer.optimizer.lr, tf_keras.optimizers.schedules.LearningRateSchedule ): self.optimizer.optimizer.lr.assign(self.original_lr) @@ -499,7 +500,7 @@ def __getstate__(self) -> dict[str, Any]: elif callback.model: callback.model = (callback.model.to_json(), callback.model.get_weights()) # don't pickle tensorboard writers either; they'll be recreated when needed - if isinstance(callback, tf.keras.callbacks.TensorBoard): + if isinstance(callback, tf_keras.callbacks.TensorBoard): tensorboard_writers.append(callback._writers) callback._writers = {} state["_optimizer"] = dill.dumps(state["_optimizer"]) @@ -512,7 +513,7 @@ def __getstate__(self) -> dict[str, Any]: for callback, model in zip(callbacks, saved_models): callback.model = model for callback, writers in zip( - (cb for cb in callbacks if isinstance(cb, tf.keras.callbacks.TensorBoard)), + (cb for cb in callbacks if isinstance(cb, tf_keras.callbacks.TensorBoard)), tensorboard_writers, ): callback._writers = writers @@ -534,7 +535,7 @@ def __setstate__(self, state: dict[str, Any]) -> None: callback.set_model(self.model) elif callback.model: model_json, weights = callback.model - model = tf.keras.models.model_from_json( + model = tf_keras.models.model_from_json( model_json, custom_objects={"MultivariateNormalTriL": MultivariateNormalTriL}, ) diff --git a/trieste/models/optimizer.py b/trieste/models/optimizer.py index a9db680af0..c17de8d0e3 100644 --- a/trieste/models/optimizer.py +++ b/trieste/models/optimizer.py @@ -28,6 +28,7 @@ import scipy import tensorflow as tf import tensorflow_probability as tfp +from gpflow.keras import tf_keras from ..data import Dataset from ..types import TensorType @@ -160,7 +161,7 @@ def __deepcopy__(self, memo: dict[int, object]) -> BatchOptimizer: for k, v in self.__dict__.items(): if ( k == "optimizer" - and isinstance(v, tf.keras.optimizers.Optimizer) + and isinstance(v, tf_keras.optimizers.Optimizer) and hasattr(v, "_distribution_strategy") ): # avoid copying distribution strategy: reuse it instead @@ -180,7 +181,7 @@ def __deepcopy__(self, memo: dict[int, object]) -> BatchOptimizer: class KerasOptimizer: """Optimizer wrapper for training models implemented with Keras.""" - optimizer: tf.keras.optimizers.Optimizer + optimizer: tf_keras.optimizers.Optimizer """ The underlying optimizer to use for training the model. """ fit_args: dict[str, Any] = field(default_factory=lambda: {}) @@ -192,12 +193,12 @@ class KerasOptimizer: loss: Optional[ Union[ - tf.keras.losses.Loss, Callable[[TensorType, tfp.distributions.Distribution], TensorType] + tf_keras.losses.Loss, Callable[[TensorType, tfp.distributions.Distribution], TensorType] ] ] = None """ Optional loss function for training the model. """ - metrics: Optional[list[tf.keras.metrics.Metric]] = None + metrics: Optional[list[tf_keras.metrics.Metric]] = None """ Optional metrics for monitoring the performance of the network. """ def __deepcopy__(self, memo: dict[int, object]) -> KerasOptimizer: From 6aeaac8548057064e7046ef3894d0270fb4ecc42 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Sat, 22 Jun 2024 19:22:20 +0100 Subject: [PATCH 03/20] Convert tf.optimizers too --- docs/notebooks/failure_ego.pct.py | 4 ++- ...ing_using_sparse_gaussian_processes.pct.py | 3 +- tests/unit/models/gpflow/test_models.py | 33 ++++++++++--------- tests/unit/models/gpflow/test_utils.py | 3 +- tests/unit/models/gpflux/test_interface.py | 4 +-- tests/unit/models/gpflux/test_models.py | 12 +++---- tests/unit/models/keras/test_architectures.py | 2 +- tests/unit/models/keras/test_interface.py | 7 ++-- tests/unit/models/keras/test_models.py | 18 +++++----- tests/util/models/gpflux/models.py | 2 +- trieste/models/gpflow/models.py | 9 ++--- trieste/models/gpflow/utils.py | 3 +- trieste/models/gpflux/interface.py | 3 +- trieste/models/gpflux/models.py | 2 +- trieste/models/keras/interface.py | 4 +-- 15 files changed, 58 insertions(+), 51 deletions(-) diff --git a/docs/notebooks/failure_ego.pct.py b/docs/notebooks/failure_ego.pct.py index 03546db85a..8be340529a 100644 --- a/docs/notebooks/failure_ego.pct.py +++ b/docs/notebooks/failure_ego.pct.py @@ -109,6 +109,8 @@ def observer(x): # We'll train the GPR model with the default Scipy-based L-BFGS optimizer, and the VGP model with the custom algorithm above. # %% +from gpflow.keras import tf_keras + from trieste.models import TrainableProbabilisticModel from trieste.models.gpflow.models import ( GaussianProcessRegression, @@ -122,7 +124,7 @@ def observer(x): OBJECTIVE: GaussianProcessRegression(regression_model), FAILURE: VariationalGaussianProcess( classification_model, - BatchOptimizer(tf.optimizers.Adam(1e-3)), + BatchOptimizer(tf_keras.optimizers.Adam(1e-3)), use_natgrads=True, ), } diff --git a/docs/notebooks/scalable_thompson_sampling_using_sparse_gaussian_processes.pct.py b/docs/notebooks/scalable_thompson_sampling_using_sparse_gaussian_processes.pct.py index 079dbd0912..bb1e3ef270 100644 --- a/docs/notebooks/scalable_thompson_sampling_using_sparse_gaussian_processes.pct.py +++ b/docs/notebooks/scalable_thompson_sampling_using_sparse_gaussian_processes.pct.py @@ -13,6 +13,7 @@ # %% import numpy as np import tensorflow as tf +from gpflow.keras import tf_keras np.random.seed(1793) tf.random.set_seed(1793) @@ -64,7 +65,7 @@ def noisy_hartmann_6( num_rff_features=1_000, inducing_point_selector=inducing_point_selector, optimizer=BatchOptimizer( - tf.optimizers.Adam(0.1), max_iter=100, batch_size=50, compile=True + tf_keras.optimizers.Adam(0.1), max_iter=100, batch_size=50, compile=True ), ) diff --git a/tests/unit/models/gpflow/test_models.py b/tests/unit/models/gpflow/test_models.py index d7ff634070..edf18eb58f 100644 --- a/tests/unit/models/gpflow/test_models.py +++ b/tests/unit/models/gpflow/test_models.py @@ -41,6 +41,7 @@ SeparateIndependentInducingVariables, SharedIndependentInducingVariables, ) +from gpflow.keras import tf_keras from gpflow.models import SGPR, SVGP, VGP from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, random_seed @@ -181,7 +182,7 @@ def test_gpflow_wrappers_ref_optimize(gpflow_interface_factory: ModelFactoryType .prefetch(tf.data.experimental.AUTOTUNE) .repeat() ) - tf.optimizers.Adam().minimize( + tf_keras.optimizers.Adam().minimize( reference_model.training_loss_closure(data=data_iter, compile=False), reference_model.trainable_variables, ) @@ -404,7 +405,7 @@ def test_gaussian_process_regression_raises_for_invalid_init() -> None: GaussianProcessRegression(gpr_model(x, y), optimizer=optimizer1) with pytest.raises(ValueError): - optimizer2 = Optimizer(tf.optimizers.Adam()) + optimizer2 = Optimizer(tf_keras.optimizers.Adam()) GaussianProcessRegression(gpr_model(x, y), optimizer=optimizer2) @@ -789,7 +790,7 @@ def test_sparse_gaussian_process_regression_raises_for_invalid_init() -> None: SparseGaussianProcessRegression(sgpr_model(x, y), optimizer=optimizer1) with pytest.raises(ValueError): - optimizer2 = Optimizer(tf.optimizers.Adam()) + optimizer2 = Optimizer(tf_keras.optimizers.Adam()) SparseGaussianProcessRegression(sgpr_model(x, y), optimizer=optimizer2) @@ -1049,7 +1050,7 @@ def test_variational_gaussian_process_raises_for_invalid_init() -> None: VariationalGaussianProcess(vgp_model(x, y), optimizer=optimizer, use_natgrads=True) with pytest.raises(ValueError): - optimizer = Optimizer(tf.optimizers.Adam()) + optimizer = Optimizer(tf_keras.optimizers.Adam()) VariationalGaussianProcess(vgp_model(x, y), optimizer=optimizer, use_natgrads=False) @@ -1153,7 +1154,7 @@ def test_variational_gaussian_process_trajectory_sampler_has_correct_samples( ) -> None: x_observed = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float()) y_observed = _3x_plus_gaussian_noise(x_observed) - optimizer = BatchOptimizer(tf.optimizers.Adam(), max_iter=20) + optimizer = BatchOptimizer(tf_keras.optimizers.Adam(), max_iter=20) likelihood = gpflow.likelihoods.Gaussian(noise_var) kernel = gpflow.kernels.Matern32(lengthscales=0.2) if use_mean_function: @@ -1241,7 +1242,7 @@ def test_variational_gaussian_process_optimize_with_and_without_natgrads( if use_natgrads: optimizer = BatchOptimizer( - tf.optimizers.Adam(), + tf_keras.optimizers.Adam(), max_iter=10, batch_size=10, dataset_builder=batcher, @@ -1270,7 +1271,7 @@ class DummyBatchOptimizer(BatchOptimizer): def optimize(self, model: tf.Module, dataset: Dataset) -> None: pass - optimizer = DummyBatchOptimizer(tf.optimizers.Adam(), compile=compile, max_iter=10) + optimizer = DummyBatchOptimizer(tf_keras.optimizers.Adam(), compile=compile, max_iter=10) model = VariationalGaussianProcess( vgp_matern_model(x_observed[:10], y_observed[:10]), optimizer=optimizer, use_natgrads=True @@ -1305,7 +1306,7 @@ def test_variational_gaussian_process_default_optimizer_is_correct(use_natgrads: if use_natgrads: assert isinstance(model.optimizer, BatchOptimizer) - assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer) + assert isinstance(model.optimizer.optimizer, tf_keras.optimizers.Optimizer) else: assert isinstance(model.optimizer, Optimizer) assert isinstance(model.optimizer.optimizer, gpflow.optimizers.Scipy) @@ -1488,7 +1489,7 @@ def test_sparse_variational_optimize_with_defaults() -> None: y_observed = _3x_plus_gaussian_noise(x_observed) data = x_observed, y_observed dataset = Dataset(*data) - optimizer = BatchOptimizer(tf.optimizers.Adam(), max_iter=20) + optimizer = BatchOptimizer(tf_keras.optimizers.Adam(), max_iter=20) model = SparseVariational(svgp_model(x_observed, y_observed), optimizer=optimizer) loss = model.model.training_loss(data) model.optimize(dataset) @@ -1502,7 +1503,7 @@ def test_sparse_variational_optimize(batcher: DatasetTransformer, compile: bool) dataset = Dataset(*data) optimizer = BatchOptimizer( - tf.optimizers.Adam(), + tf_keras.optimizers.Adam(), max_iter=10, batch_size=10, dataset_builder=batcher, @@ -1536,7 +1537,7 @@ def test_sparse_variational_trajectory_sampler_has_correct_samples( mean = gpflow.mean_functions.Zero() svgp = svgp_model_by_type(x, kernel_type + "+shared", whiten, len(x), noise_var, mean) - optimizer = BatchOptimizer(tf.optimizers.Adam(1.0), max_iter=10) + optimizer = BatchOptimizer(tf_keras.optimizers.Adam(1.0), max_iter=10) model = SparseVariational(svgp, optimizer=optimizer) model.update(Dataset(x, y)) model.optimize(Dataset(x, y)) @@ -1572,7 +1573,7 @@ def test_sparse_variational_default_optimizer_is_correct() -> None: model = SparseVariational(svgp_model(x_observed, y_observed)) assert isinstance(model.optimizer, BatchOptimizer) - assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer) + assert isinstance(model.optimizer.optimizer, tf_keras.optimizers.Optimizer) def test_sparse_variational_raises_for_invalid_init() -> None: @@ -1590,7 +1591,7 @@ def test_sparse_variational_raises_for_invalid_init() -> None: SparseVariational(svgp_model(x_observed, y_observed), optimizer=optimizer1) with pytest.raises(ValueError): - optimizer2 = Optimizer(tf.optimizers.Adam()) + optimizer2 = Optimizer(tf_keras.optimizers.Adam()) SparseVariational(svgp_model(x_observed, y_observed), optimizer=optimizer2) @@ -1607,7 +1608,9 @@ def test_sparse_variational_pairwise_covariance_for_non_whitened( y2 = y1 * 0.5 svgp = svgp_model_by_type(x, mo_type, whiten) - model = SparseVariational(svgp, BatchOptimizer(tf.optimizers.Adam(), max_iter=3, batch_size=10)) + model = SparseVariational( + svgp, BatchOptimizer(tf_keras.optimizers.Adam(), max_iter=3, batch_size=10) + ) model.model.whiten = whiten model.optimize(Dataset(x, tf.concat([y1, y2], axis=-1))) @@ -1657,7 +1660,7 @@ def test_sparse_variational_inducing_updates_preserves_posterior( inducing_point_selector = DummyInducingPointSelector(xnew) model = SparseVariational( svgp, - BatchOptimizer(tf.optimizers.Adam(), max_iter=3, batch_size=10), + BatchOptimizer(tf_keras.optimizers.Adam(), max_iter=3, batch_size=10), inducing_point_selector=inducing_point_selector, ) diff --git a/tests/unit/models/gpflow/test_utils.py b/tests/unit/models/gpflow/test_utils.py index 0e9b3b98ff..fcbbf5f9db 100644 --- a/tests/unit/models/gpflow/test_utils.py +++ b/tests/unit/models/gpflow/test_utils.py @@ -25,6 +25,7 @@ import pytest import tensorflow as tf import tensorflow_probability as tfp +from gpflow.keras import tf_keras from tests.util.misc import random_seed from tests.util.models.gpflow.models import ModelFactoryType @@ -327,5 +328,5 @@ def test_check_optimizer_raises_for_invalid_optimizer_wrapper_combination() -> N check_optimizer(optimizer1) with pytest.raises(ValueError): - optimizer2 = Optimizer(tf.optimizers.Adam()) + optimizer2 = Optimizer(tf_keras.optimizers.Adam()) check_optimizer(optimizer2) diff --git a/tests/unit/models/gpflux/test_interface.py b/tests/unit/models/gpflux/test_interface.py index fa64efa216..976203517c 100644 --- a/tests/unit/models/gpflux/test_interface.py +++ b/tests/unit/models/gpflux/test_interface.py @@ -36,13 +36,13 @@ class _QuadraticPredictor(GPfluxPredictor): def __init__( self, - optimizer: tf.optimizers.Optimizer | None = None, + optimizer: tf_keras.optimizers.Optimizer | None = None, likelihood: gpflow.likelihoods.Likelihood = gpflow.likelihoods.Gaussian(0.01), ): super().__init__(optimizer=optimizer) if optimizer is None: - self._optimizer = tf.optimizers.Adam() + self._optimizer = tf_keras.optimizers.Adam() else: self._optimizer = optimizer self._model_gpflux = _QuadraticGPModel(likelihood=likelihood) diff --git a/tests/unit/models/gpflux/test_models.py b/tests/unit/models/gpflux/test_models.py index 9347db96fa..a34bd31b6c 100644 --- a/tests/unit/models/gpflux/test_models.py +++ b/tests/unit/models/gpflux/test_models.py @@ -166,7 +166,7 @@ def test_deep_gaussian_process_optimize( dataset = Dataset(*data) fit_args = {"batch_size": batch_size, "epochs": 10, "verbose": 0} - optimizer = KerasOptimizer(tf.optimizers.Adam(), fit_args) + optimizer = KerasOptimizer(tf_keras.optimizers.Adam(), fit_args) model = DeepGaussianProcess(two_layer_model(x_observed), optimizer) elbo = model.model_gpflux.elbo(data) @@ -271,7 +271,7 @@ def scheduler(epoch: int, lr: float) -> float: "verbose": 0, "callbacks": tf_keras.callbacks.LearningRateScheduler(scheduler), } - optimizer = KerasOptimizer(tf.optimizers.Adam(init_lr), fit_args) + optimizer = KerasOptimizer(tf_keras.optimizers.Adam(init_lr), fit_args) model = DeepGaussianProcess(two_layer_model(x), optimizer) @@ -300,7 +300,7 @@ def test_deep_gaussian_process_with_lr_scheduler( lr_schedule = tf_keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=init_lr, decay_steps=1, decay_rate=0.5 ) - optimizer = KerasOptimizer(tf.optimizers.Adam(lr_schedule), fit_args) + optimizer = KerasOptimizer(tf_keras.optimizers.Adam(lr_schedule), fit_args) model = DeepGaussianProcess(two_layer_model(x), optimizer) optimize_model_and_save_result(model, Dataset(x, y)) @@ -325,7 +325,7 @@ def test_deep_gaussian_process_default_optimizer_is_correct( } assert isinstance(model.optimizer, KerasOptimizer) - assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer) + assert isinstance(model.optimizer.optimizer, tf_keras.optimizers.Optimizer) assert model_fit_args == fit_args @@ -347,7 +347,7 @@ class DummySubClass(DeepGaussianProcess): } assert isinstance(model.optimizer, KerasOptimizer) - assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer) + assert isinstance(model.optimizer.optimizer, tf_keras.optimizers.Optimizer) assert model_fit_args == fit_args @@ -530,7 +530,7 @@ def test_deepgp_log( model = DeepGaussianProcess( single_layer_dgp_model(x_observed), - KerasOptimizer(tf.optimizers.Adam(), {"batch_size": 200, "epochs": 3, "verbose": 0}), + KerasOptimizer(tf_keras.optimizers.Adam(), {"batch_size": 200, "epochs": 3, "verbose": 0}), ) model.optimize(dataset) diff --git a/tests/unit/models/keras/test_architectures.py b/tests/unit/models/keras/test_architectures.py index a34c7229e5..57af6f6fac 100644 --- a/tests/unit/models/keras/test_architectures.py +++ b/tests/unit/models/keras/test_architectures.py @@ -134,7 +134,7 @@ def test_keras_ensemble_can_be_compiled() -> None: example_data = empty_dataset([1], [1]) keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE) - keras_ensemble.model.compile(tf.optimizers.Adam(), negative_log_likelihood) + keras_ensemble.model.compile(tf_keras.optimizers.Adam(), negative_log_likelihood) assert keras_ensemble.model.compiled_loss is not None assert keras_ensemble.model.compiled_metrics is not None diff --git a/tests/unit/models/keras/test_interface.py b/tests/unit/models/keras/test_interface.py index 87ad741abc..92891fd02d 100644 --- a/tests/unit/models/keras/test_interface.py +++ b/tests/unit/models/keras/test_interface.py @@ -16,7 +16,6 @@ import gpflow import pytest -import tensorflow as tf from gpflow.keras import tf_keras from tests.util.misc import empty_dataset, raise_exc @@ -40,13 +39,13 @@ def test_keras_predictor_default_optimizer_is_correct() -> None: model = _DummyKerasPredictor() assert isinstance(model._optimizer, KerasOptimizer) - assert isinstance(model._optimizer.optimizer, tf.optimizers.Adam) + assert isinstance(model._optimizer.optimizer, tf_keras.optimizers.Adam) assert isinstance(model.optimizer, KerasOptimizer) - assert isinstance(model.optimizer.optimizer, tf.optimizers.Adam) + assert isinstance(model.optimizer.optimizer, tf_keras.optimizers.Adam) def test_keras_predictor_check_optimizer_property() -> None: - optimizer = KerasOptimizer(tf.optimizers.RMSprop()) + optimizer = KerasOptimizer(tf_keras.optimizers.RMSprop()) model = _DummyKerasPredictor(optimizer) assert model.optimizer == optimizer diff --git a/tests/unit/models/keras/test_models.py b/tests/unit/models/keras/test_models.py index f66d696be5..75df77d9c4 100644 --- a/tests/unit/models/keras/test_models.py +++ b/tests/unit/models/keras/test_models.py @@ -107,10 +107,10 @@ def _ensemblise_data( return inputs, outputs -@pytest.mark.parametrize("optimizer", [tf.optimizers.Adam(), tf.optimizers.RMSprop()]) +@pytest.mark.parametrize("optimizer", [tf_keras.optimizers.Adam(), tf_keras.optimizers.RMSprop()]) @pytest.mark.parametrize("diversify", [False, True]) def test_deep_ensemble_repr( - optimizer: tf.optimizers.Optimizer, bootstrap_data: bool, diversify: bool + optimizer: tf_keras.optimizers.Optimizer, bootstrap_data: bool, diversify: bool ) -> None: example_data = empty_dataset([1], [1]) @@ -165,7 +165,7 @@ def test_deep_ensemble_default_optimizer_is_correct() -> None: del model.optimizer.fit_args["callbacks"] assert isinstance(model.optimizer, KerasOptimizer) - assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer) + assert isinstance(model.optimizer.optimizer, tf_keras.optimizers.Optimizer) assert model.optimizer.fit_args == default_fit_args assert model.optimizer.loss == default_loss @@ -178,7 +178,7 @@ def test_deep_ensemble_optimizer_changed_correctly() -> None: "epochs": 10, "batch_size": 10, } - custom_optimizer = tf.optimizers.RMSprop() + custom_optimizer = tf_keras.optimizers.RMSprop() custom_loss = tf_keras.losses.MeanSquaredError() optimizer_wrapper = KerasOptimizer(custom_optimizer, custom_fit_args, custom_loss) @@ -246,7 +246,7 @@ def scheduler(epoch: int, lr: float) -> float: "verbose": 0, "callbacks": tf_keras.callbacks.LearningRateScheduler(scheduler), } - optimizer = KerasOptimizer(tf.optimizers.Adam(init_lr), fit_args) + optimizer = KerasOptimizer(tf_keras.optimizers.Adam(init_lr), fit_args) model = DeepEnsemble(keras_ensemble, optimizer) npt.assert_allclose(model.model.optimizer.lr.numpy(), init_lr, rtol=1e-6) @@ -276,7 +276,7 @@ def test_deep_ensemble_with_lr_scheduler() -> None: lr_schedule = tf_keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=init_lr, decay_steps=1, decay_rate=0.5 ) - optimizer = KerasOptimizer(tf.optimizers.Adam(lr_schedule), fit_args) + optimizer = KerasOptimizer(tf_keras.optimizers.Adam(lr_schedule), fit_args) model = DeepEnsemble(keras_ensemble, optimizer) model.optimize(example_data) @@ -419,7 +419,7 @@ def test_deep_ensemble_optimize(ensemble_size: int, bootstrap_data: bool, epochs keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size, False) - custom_optimizer = tf.optimizers.RMSprop() + custom_optimizer = tf_keras.optimizers.RMSprop() custom_fit_args = { "verbose": 0, "epochs": epochs, @@ -444,7 +444,7 @@ def test_deep_ensemble_loss(bootstrap_data: bool) -> None: example_data = _get_example_data([100, 1]) loss = negative_log_likelihood - optimizer = tf.optimizers.Adam() + optimizer = tf_keras.optimizers.Adam() model = DeepEnsemble( trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False), @@ -470,7 +470,7 @@ def test_deep_ensemble_predict_ensemble() -> None: example_data = _get_example_data([100, 1]) loss = negative_log_likelihood - optimizer = tf.optimizers.Adam() + optimizer = tf_keras.optimizers.Adam() model = DeepEnsemble( trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False), diff --git a/tests/util/models/gpflux/models.py b/tests/util/models/gpflux/models.py index 412ed21972..d971d6d925 100644 --- a/tests/util/models/gpflux/models.py +++ b/tests/util/models/gpflux/models.py @@ -153,7 +153,7 @@ def scheduler(epoch: int, lr: float) -> float: "verbose": 0, "callbacks": tf_keras.callbacks.LearningRateScheduler(scheduler), } - optimizer = KerasOptimizer(tf.optimizers.Adam(learning_rate), fit_args) + optimizer = KerasOptimizer(tf_keras.optimizers.Adam(learning_rate), fit_args) model = DeepGaussianProcess(dgp, optimizer) diff --git a/trieste/models/gpflow/models.py b/trieste/models/gpflow/models.py index 235325e399..d30d0243cd 100644 --- a/trieste/models/gpflow/models.py +++ b/trieste/models/gpflow/models.py @@ -25,6 +25,7 @@ SeparateIndependentInducingVariables, SharedIndependentInducingVariables, ) +from gpflow.keras import tf_keras from gpflow.logdensities import multivariate_normal from gpflow.models import GPR, SGPR, SVGP, VGP from gpflow.models.vgp import update_vgp_data @@ -881,7 +882,7 @@ def __init__( ) if optimizer is None: - optimizer = BatchOptimizer(tf.optimizers.Adam(), batch_size=100, compile=True) + optimizer = BatchOptimizer(tf_keras.optimizers.Adam(), batch_size=100, compile=True) super().__init__(optimizer) self._model = model @@ -1156,14 +1157,14 @@ def __init__( if optimizer is None and not use_natgrads: optimizer = Optimizer(gpflow.optimizers.Scipy(), compile=True) elif optimizer is None and use_natgrads: - optimizer = BatchOptimizer(tf.optimizers.Adam(), batch_size=100, compile=True) + optimizer = BatchOptimizer(tf_keras.optimizers.Adam(), batch_size=100, compile=True) super().__init__(optimizer) check_optimizer(self.optimizer) if use_natgrads: - if not isinstance(self.optimizer.optimizer, tf.optimizers.Optimizer): + if not isinstance(self.optimizer.optimizer, tf_keras.optimizers.Optimizer): raise ValueError( f""" Natgrads can only be used with a BatchOptimizer wrapper using an instance of @@ -1172,7 +1173,7 @@ def __init__( ) natgrad_gamma = 0.1 if natgrad_gamma is None else natgrad_gamma else: - if isinstance(self.optimizer.optimizer, tf.optimizers.Optimizer): + if isinstance(self.optimizer.optimizer, tf_keras.optimizers.Optimizer): raise ValueError( f""" If not using natgrads an Optimizer wrapper should be used with diff --git a/trieste/models/gpflow/utils.py b/trieste/models/gpflow/utils.py index fc6534b067..38476ad617 100644 --- a/trieste/models/gpflow/utils.py +++ b/trieste/models/gpflow/utils.py @@ -19,6 +19,7 @@ import gpflow import tensorflow as tf import tensorflow_probability as tfp +from gpflow.keras import tf_keras from ...data import Dataset from ...types import TensorType @@ -148,7 +149,7 @@ def check_optimizer(optimizer: Union[BatchOptimizer, Optimizer]) -> None: """ ) - if isinstance(optimizer.optimizer, tf.optimizers.Optimizer): + if isinstance(optimizer.optimizer, tf_keras.optimizers.Optimizer): if not isinstance(optimizer, BatchOptimizer): raise ValueError( f""" diff --git a/trieste/models/gpflux/interface.py b/trieste/models/gpflux/interface.py index e471dca763..16fd6fe440 100644 --- a/trieste/models/gpflux/interface.py +++ b/trieste/models/gpflux/interface.py @@ -16,7 +16,6 @@ from abc import ABC, abstractmethod -import tensorflow as tf from check_shapes import inherit_check_shapes from gpflow.base import Module from gpflow.keras import tf_keras @@ -41,7 +40,7 @@ def __init__(self, optimizer: KerasOptimizer | None = None): :class:`~tf.optimizers.Adam` optimizer with 0.01 learning rate. """ if optimizer is None: - optimizer = KerasOptimizer(tf.optimizers.Adam(0.01)) + optimizer = KerasOptimizer(tf_keras.optimizers.Adam(0.01)) self._optimizer = optimizer diff --git a/trieste/models/gpflux/models.py b/trieste/models/gpflux/models.py index 3d01473d8c..723bdc46ff 100644 --- a/trieste/models/gpflux/models.py +++ b/trieste/models/gpflux/models.py @@ -122,7 +122,7 @@ def __init__( ) self._num_rff_features = num_rff_features - if not isinstance(self.optimizer.optimizer, tf.optimizers.Optimizer): + if not isinstance(self.optimizer.optimizer, tf_keras.optimizers.Optimizer): raise ValueError( f"Optimizer for `DeepGaussianProcess` must be an instance of a " f"`tf.optimizers.Optimizer` or `tf.keras.optimizers.Optimizer`, " diff --git a/trieste/models/keras/interface.py b/trieste/models/keras/interface.py index fa48b0db1a..91928d3eef 100644 --- a/trieste/models/keras/interface.py +++ b/trieste/models/keras/interface.py @@ -43,10 +43,10 @@ def __init__(self, optimizer: Optional[KerasOptimizer] = None): :raise ValueError: If the optimizer is not an instance of :class:`~tf.optimizers.Optimizer`. """ if optimizer is None: - optimizer = KerasOptimizer(tf.optimizers.Adam()) + optimizer = KerasOptimizer(tf_keras.optimizers.Adam()) self._optimizer = optimizer - if not isinstance(optimizer.optimizer, tf.optimizers.Optimizer): + if not isinstance(optimizer.optimizer, tf_keras.optimizers.Optimizer): raise ValueError( f"Optimizer for `KerasPredictor` models must be an instance of a " f"`tf.optimizers.Optimizer`, received {type(optimizer.optimizer)} instead." From 1afb1746e9efafec4f2883a4c7fae08c39deee1e Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Sat, 22 Jun 2024 19:32:43 +0100 Subject: [PATCH 04/20] Reformat --- docs/notebooks/deep_ensembles.pct.py | 2 +- .../integration/test_bayesian_optimization.py | 24 ++++--- tests/unit/acquisition/test_combination.py | 4 +- tests/unit/acquisition/test_rule.py | 17 ++--- tests/unit/models/test_interfaces.py | 10 ++- tests/util/misc.py | 6 +- trieste/acquisition/function/entropy.py | 18 ++--- trieste/acquisition/function/greedy_batch.py | 16 +++-- .../acquisition/function/multi_objective.py | 32 +++++---- trieste/acquisition/optimizer.py | 8 ++- trieste/acquisition/rule.py | 45 +++++------- trieste/ask_tell_optimization.py | 72 ++++++++++--------- trieste/bayesian_optimizer.py | 50 ++++++------- trieste/objectives/utils.py | 6 +- trieste/space.py | 9 +-- 15 files changed, 149 insertions(+), 170 deletions(-) diff --git a/docs/notebooks/deep_ensembles.pct.py b/docs/notebooks/deep_ensembles.pct.py index 5c57b602c6..a3814a9eb9 100644 --- a/docs/notebooks/deep_ensembles.pct.py +++ b/docs/notebooks/deep_ensembles.pct.py @@ -72,7 +72,7 @@ def objective(x, error=True): # %% -from gpflow import tf_keras +from gpflow.keras import tf_keras from trieste.models.keras import ( DeepEnsemble, KerasPredictor, diff --git a/tests/integration/test_bayesian_optimization.py b/tests/integration/test_bayesian_optimization.py index a5efcb4651..12b19b9804 100644 --- a/tests/integration/test_bayesian_optimization.py +++ b/tests/integration/test_bayesian_optimization.py @@ -303,8 +303,10 @@ def GPR_OPTIMIZER_PARAMS() -> Tuple[str, List[ParameterSet]]: @pytest.mark.parametrize(*GPR_OPTIMIZER_PARAMS()) def test_bayesian_optimizer_with_gpr_finds_minima_of_scaled_branin( num_steps: int, - acquisition_rule: AcquisitionRuleType[GaussianProcessRegression] - | Tuple[AcquisitionRuleType[GaussianProcessRegression], int], + acquisition_rule: ( + AcquisitionRuleType[GaussianProcessRegression] + | Tuple[AcquisitionRuleType[GaussianProcessRegression], int] + ), ) -> None: _test_optimizer_finds_minimum( GaussianProcessRegression, @@ -318,8 +320,10 @@ def test_bayesian_optimizer_with_gpr_finds_minima_of_scaled_branin( @pytest.mark.parametrize(*GPR_OPTIMIZER_PARAMS()) def test_bayesian_optimizer_with_gpr_finds_minima_of_simple_quadratic( num_steps: int, - acquisition_rule: AcquisitionRuleType[GaussianProcessRegression] - | Tuple[AcquisitionRuleType[GaussianProcessRegression], int], + acquisition_rule: ( + AcquisitionRuleType[GaussianProcessRegression] + | Tuple[AcquisitionRuleType[GaussianProcessRegression], int] + ), ) -> None: # for speed reasons we sometimes test with a simple quadratic defined on the same search space # branin; currently assume that every rule should be able to solve this in 6 steps @@ -601,8 +605,10 @@ def test_bayesian_optimizer_with_PCTS_and_deep_ensemble_finds_minima_of_simple_q def _test_optimizer_finds_minimum( model_type: Type[TrainableProbabilisticModelType], num_steps: Optional[int], - acquisition_rule: AcquisitionRuleType[TrainableProbabilisticModelType] - | Tuple[AcquisitionRuleType[TrainableProbabilisticModelType], int], + acquisition_rule: ( + AcquisitionRuleType[TrainableProbabilisticModelType] + | Tuple[AcquisitionRuleType[TrainableProbabilisticModelType], int] + ), optimize_branin: bool = False, model_args: Optional[Mapping[str, Any]] = None, check_regret: bool = False, @@ -752,9 +758,9 @@ def patched_tf_cast(x: TensorType, dtype: tf.DType) -> TensorType: # check history saved ok assert len(result.history) <= (num_steps or 2) assert len(result.loaded_history) == len(result.history) - loaded_result: OptimizationResult[ - None, TrainableProbabilisticModel - ] = OptimizationResult.from_path(Path(tmpdirname) / "history") + loaded_result: OptimizationResult[None, TrainableProbabilisticModel] = ( + OptimizationResult.from_path(Path(tmpdirname) / "history") + ) assert loaded_result.final_result.is_ok assert len(loaded_result.history) == len(result.history) diff --git a/tests/unit/acquisition/test_combination.py b/tests/unit/acquisition/test_combination.py index 95415f6994..30cd9d6e73 100644 --- a/tests/unit/acquisition/test_combination.py +++ b/tests/unit/acquisition/test_combination.py @@ -126,9 +126,7 @@ def test_sum_and_product_for_single_builder( reducer_class: type[Sum[ProbabilisticModel] | Product[ProbabilisticModel]], ) -> None: data, models = {TAG: empty_dataset([1], [1])}, {TAG: QuadraticMeanAndRBFKernel()} - acq = reducer_class(_Static(lambda x: x**2)).prepare_acquisition_function( - models, datasets=data - ) + acq = reducer_class(_Static(lambda x: x**2)).prepare_acquisition_function(models, datasets=data) xs = tf.random.uniform([3, 5, 1], minval=-1.0) npt.assert_allclose(acq(xs), xs**2) diff --git a/tests/unit/acquisition/test_rule.py b/tests/unit/acquisition/test_rule.py index 57d8d73b8c..8040bb493d 100644 --- a/tests/unit/acquisition/test_rule.py +++ b/tests/unit/acquisition/test_rule.py @@ -359,9 +359,10 @@ def test_joint_batch_acquisition_rule_acquire( search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3])) num_query_points = 4 acq = _JointBatchModelMinusMeanMaximumSingleBuilder() - acq_rule: AcquisitionRule[TensorType, Box, ProbabilisticModel] | AcquisitionRule[ - State[TensorType, AsynchronousRuleState], Box, ProbabilisticModel - ] = rule_fn(acq, num_query_points) + acq_rule: ( + AcquisitionRule[TensorType, Box, ProbabilisticModel] + | AcquisitionRule[State[TensorType, AsynchronousRuleState], Box, ProbabilisticModel] + ) = rule_fn(acq, num_query_points) dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1])) points_or_stateful = acq_rule.acquire_single( @@ -431,9 +432,10 @@ def test_greedy_batch_acquisition_rule_acquire( num_query_points = 4 acq = _GreedyBatchModelMinusMeanMaximumSingleBuilder() assert acq._update_count == 0 - acq_rule: AcquisitionRule[TensorType, Box, ProbabilisticModel] | AcquisitionRule[ - State[TensorType, AsynchronousRuleState], Box, ProbabilisticModel - ] = rule_fn(acq, num_query_points) + acq_rule: ( + AcquisitionRule[TensorType, Box, ProbabilisticModel] + | AcquisitionRule[State[TensorType, AsynchronousRuleState], Box, ProbabilisticModel] + ) = rule_fn(acq, num_query_points) dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1])) points_or_stateful = acq_rule.acquire_single( search_space, QuadraticMeanAndRBFKernel(), dataset=dataset @@ -1691,8 +1693,7 @@ def location(self) -> TensorType: return self._location @location.setter - def location(self, location: TensorType) -> None: - ... + def location(self, location: TensorType) -> None: ... def _init_eps(self) -> None: self.eps = tf.constant(self._init_eps_val, dtype=tf.float64) diff --git a/tests/unit/models/test_interfaces.py b/tests/unit/models/test_interfaces.py index 6213f7e723..49ff7d0eba 100644 --- a/tests/unit/models/test_interfaces.py +++ b/tests/unit/models/test_interfaces.py @@ -60,12 +60,10 @@ def __init__( ) -def _model_stack() -> ( - tuple[ - TrainablePredictJointReparamModelStack, - tuple[TrainableSupportsPredictJointHasReparamSampler, ...], - ] -): +def _model_stack() -> tuple[ + TrainablePredictJointReparamModelStack, + tuple[TrainableSupportsPredictJointHasReparamSampler, ...], +]: model01 = _QuadraticModel([0.0, 0.5], [1.0, 0.3]) model2 = _QuadraticModel([2.0], [2.0]) model3 = _QuadraticModel([-1.0], [0.1]) diff --git a/tests/util/misc.py b/tests/util/misc.py index 59b2f2327e..13c3a0b8e3 100644 --- a/tests/util/misc.py +++ b/tests/util/misc.py @@ -55,13 +55,11 @@ @overload -def random_seed(f_py: C, seed: int = 0) -> C: - ... +def random_seed(f_py: C, seed: int = 0) -> C: ... @overload -def random_seed(f_py: None = None, seed: int = 0) -> Callable[[C], C]: - ... +def random_seed(f_py: None = None, seed: int = 0) -> Callable[[C], C]: ... def random_seed(f_py: Optional[C] = None, seed: int = 0) -> Callable[[C], C] | C: diff --git a/trieste/acquisition/function/entropy.py b/trieste/acquisition/function/entropy.py index c88a02b5e3..d53cb6a61b 100644 --- a/trieste/acquisition/function/entropy.py +++ b/trieste/acquisition/function/entropy.py @@ -67,8 +67,7 @@ def __init__( num_samples: int = 5, grid_size: int = 1000, min_value_sampler: None = None, - ): - ... + ): ... @overload def __init__( @@ -77,8 +76,7 @@ def __init__( num_samples: int = 5, grid_size: int = 1000, min_value_sampler: Optional[ThompsonSampler[ProbabilisticModelType]] = None, - ): - ... + ): ... def __init__( self, @@ -258,8 +256,7 @@ def __init__( grid_size: int = 1000, min_value_sampler: None = None, rescaled_repulsion: bool = True, - ): - ... + ): ... @overload def __init__( @@ -269,8 +266,7 @@ def __init__( grid_size: int = 1000, min_value_sampler: Optional[ThompsonSampler[GIBBONModelType]] = None, rescaled_repulsion: bool = True, - ): - ... + ): ... def __init__( self, @@ -656,8 +652,7 @@ def __init__( num_samples: int = 5, grid_size: int = 1000, min_value_sampler: None = None, - ): - ... + ): ... @overload def __init__( @@ -666,8 +661,7 @@ def __init__( num_samples: int = 5, grid_size: int = 1000, min_value_sampler: Optional[ThompsonSampler[MUMBOModelType]] = None, - ): - ... + ): ... def __init__( self, diff --git a/trieste/acquisition/function/greedy_batch.py b/trieste/acquisition/function/greedy_batch.py index 9637f38152..3de6050c85 100644 --- a/trieste/acquisition/function/greedy_batch.py +++ b/trieste/acquisition/function/greedy_batch.py @@ -83,10 +83,12 @@ def __init__( Union[PenalizationFunction, UpdatablePenalizationFunction], ] ] = None, - base_acquisition_function_builder: ExpectedImprovement - | MinValueEntropySearch[ProbabilisticModel] - | MakePositive[ProbabilisticModel] - | None = None, + base_acquisition_function_builder: ( + ExpectedImprovement + | MinValueEntropySearch[ProbabilisticModel] + | MakePositive[ProbabilisticModel] + | None + ) = None, ): """ :param search_space: The global search space over which the optimisation is defined. @@ -108,9 +110,9 @@ def __init__( self._lipschitz_penalizer = soft_local_penalizer if penalizer is None else penalizer if base_acquisition_function_builder is None: - self._base_builder: SingleModelAcquisitionBuilder[ - ProbabilisticModel - ] = ExpectedImprovement() + self._base_builder: SingleModelAcquisitionBuilder[ProbabilisticModel] = ( + ExpectedImprovement() + ) else: self._base_builder = base_acquisition_function_builder diff --git a/trieste/acquisition/function/multi_objective.py b/trieste/acquisition/function/multi_objective.py index af12adaa7c..d72c00eae1 100644 --- a/trieste/acquisition/function/multi_objective.py +++ b/trieste/acquisition/function/multi_objective.py @@ -55,9 +55,9 @@ class ExpectedHypervolumeImprovement(SingleModelAcquisitionBuilder[Probabilistic def __init__( self, - reference_point_spec: Sequence[float] - | TensorType - | Callable[..., TensorType] = get_reference_point, + reference_point_spec: ( + Sequence[float] | TensorType | Callable[..., TensorType] + ) = get_reference_point, ): """ :param reference_point_spec: this method is used to determine how the reference point is @@ -262,9 +262,9 @@ class BatchMonteCarloExpectedHypervolumeImprovement( def __init__( self, sample_size: int, - reference_point_spec: Sequence[float] - | TensorType - | Callable[..., TensorType] = get_reference_point, + reference_point_spec: ( + Sequence[float] | TensorType | Callable[..., TensorType] + ) = get_reference_point, *, jitter: float = DEFAULTS.JITTER, ): @@ -426,9 +426,9 @@ def __init__( objective_tag: Tag, constraint_builder: AcquisitionFunctionBuilder[ProbabilisticModelType], min_feasibility_probability: float | TensorType = 0.5, - reference_point_spec: Sequence[float] - | TensorType - | Callable[..., TensorType] = get_reference_point, + reference_point_spec: ( + Sequence[float] | TensorType | Callable[..., TensorType] + ) = get_reference_point, ): """ :param objective_tag: The tag for the objective data and model. @@ -520,9 +520,11 @@ class HIPPO(GreedyAcquisitionFunctionBuilder[ProbabilisticModelType]): def __init__( self, objective_tag: Tag = OBJECTIVE, - base_acquisition_function_builder: AcquisitionFunctionBuilder[ProbabilisticModelType] - | SingleModelAcquisitionBuilder[ProbabilisticModelType] - | None = None, + base_acquisition_function_builder: ( + AcquisitionFunctionBuilder[ProbabilisticModelType] + | SingleModelAcquisitionBuilder[ProbabilisticModelType] + | None + ) = None, ): """ Initializes the HIPPO acquisition function builder. @@ -534,9 +536,9 @@ def __init__( """ self._objective_tag = objective_tag if base_acquisition_function_builder is None: - self._base_builder: AcquisitionFunctionBuilder[ - ProbabilisticModelType - ] = ExpectedHypervolumeImprovement().using(self._objective_tag) + self._base_builder: AcquisitionFunctionBuilder[ProbabilisticModelType] = ( + ExpectedHypervolumeImprovement().using(self._objective_tag) + ) else: if isinstance(base_acquisition_function_builder, SingleModelAcquisitionBuilder): self._base_builder = base_acquisition_function_builder.using(self._objective_tag) diff --git a/trieste/acquisition/optimizer.py b/trieste/acquisition/optimizer.py index 01c33ece7e..5b91a48c69 100644 --- a/trieste/acquisition/optimizer.py +++ b/trieste/acquisition/optimizer.py @@ -847,9 +847,11 @@ def get_bounds_of_optimization(space: SearchSpace, starting_points: TensorType) # Otherwise, we use the original bounds. bounds = [ [ - get_bounds_of_box_relaxation_around_point(ss, starting_points[i : i + 1, j]) - if isinstance(ss, TaggedProductSearchSpace) - else spo.Bounds(ss.lower, ss.upper) + ( + get_bounds_of_box_relaxation_around_point(ss, starting_points[i : i + 1, j]) + if isinstance(ss, TaggedProductSearchSpace) + else spo.Bounds(ss.lower, ss.upper) + ) for j, ss in enumerate(subspaces) ] for i in tf.range(num_optimization_runs_per_function) diff --git a/trieste/acquisition/rule.py b/trieste/acquisition/rule.py index 4a5446560a..e1aff5e068 100644 --- a/trieste/acquisition/rule.py +++ b/trieste/acquisition/rule.py @@ -216,8 +216,7 @@ def __init__( optimizer: AcquisitionOptimizer[SearchSpaceType] | None = None, num_query_points: int = 1, initial_acquisition_function: Optional[AcquisitionFunction] = None, - ): - ... + ): ... @overload def __init__( @@ -231,8 +230,7 @@ def __init__( optimizer: AcquisitionOptimizer[SearchSpaceType] | None = None, num_query_points: int = 1, initial_acquisition_function: Optional[AcquisitionFunction] = None, - ): - ... + ): ... def __init__( self, @@ -524,8 +522,7 @@ def __init__( builder: None = None, optimizer: AcquisitionOptimizer[SearchSpaceType] | None = None, num_query_points: int = 1, - ): - ... + ): ... @overload def __init__( @@ -536,8 +533,7 @@ def __init__( ), optimizer: AcquisitionOptimizer[SearchSpaceType] | None = None, num_query_points: int = 1, - ): - ... + ): ... def __init__( self, @@ -697,8 +693,10 @@ class AsynchronousGreedy( def __init__( self, - builder: GreedyAcquisitionFunctionBuilder[ProbabilisticModelType] - | SingleModelGreedyAcquisitionBuilder[ProbabilisticModelType], + builder: ( + GreedyAcquisitionFunctionBuilder[ProbabilisticModelType] + | SingleModelGreedyAcquisitionBuilder[ProbabilisticModelType] + ), optimizer: AcquisitionOptimizer[SearchSpaceType] | None = None, num_query_points: int = 1, ): @@ -897,8 +895,7 @@ def __init__( num_query_points: int, thompson_sampler: None = None, select_output: Callable[[TensorType], TensorType] = select_nth_output, - ): - ... + ): ... @overload def __init__( @@ -907,8 +904,7 @@ def __init__( num_query_points: int, thompson_sampler: Optional[ThompsonSampler[ProbabilisticModelType]] = None, select_output: Callable[[TensorType], TensorType] = select_nth_output, - ): - ... + ): ... def __init__( self, @@ -1116,16 +1112,13 @@ def _get_tags(self, tags: Set[Tag]) -> Tuple[Set[Tag], Set[Tag]]: return local_gtags, global_tags @overload - def with_input_active_dims(self, value: TensorType) -> TensorType: - ... + def with_input_active_dims(self, value: TensorType) -> TensorType: ... @overload - def with_input_active_dims(self, value: Dataset) -> Dataset: - ... + def with_input_active_dims(self, value: Dataset) -> Dataset: ... @overload - def with_input_active_dims(self, value: ProbabilisticModel) -> ProbabilisticModel: - ... + def with_input_active_dims(self, value: ProbabilisticModel) -> ProbabilisticModel: ... def with_input_active_dims( self, value: Union[TensorType, Dataset, ProbabilisticModel] @@ -1166,22 +1159,18 @@ def with_input_active_dims( return selected_input @overload - def select_in_region(self, mapping: None) -> None: - ... + def select_in_region(self, mapping: None) -> None: ... @overload - def select_in_region(self, mapping: Mapping[Tag, TensorType]) -> Mapping[Tag, TensorType]: - ... + def select_in_region(self, mapping: Mapping[Tag, TensorType]) -> Mapping[Tag, TensorType]: ... @overload - def select_in_region(self, mapping: Mapping[Tag, Dataset]) -> Mapping[Tag, Dataset]: - ... + def select_in_region(self, mapping: Mapping[Tag, Dataset]) -> Mapping[Tag, Dataset]: ... @overload def select_in_region( self, mapping: Mapping[Tag, ProbabilisticModel] - ) -> Mapping[Tag, ProbabilisticModel]: - ... + ) -> Mapping[Tag, ProbabilisticModel]: ... def select_in_region( self, mapping: Optional[Mapping[Tag, Union[TensorType, Dataset, ProbabilisticModel]]] diff --git a/trieste/ask_tell_optimization.py b/trieste/ask_tell_optimization.py index e5e4579c0a..f06a3c8108 100644 --- a/trieste/ask_tell_optimization.py +++ b/trieste/ask_tell_optimization.py @@ -110,8 +110,7 @@ def __init__( track_data: bool = True, local_data_ixs: Optional[Sequence[TensorType]] = None, local_data_len: Optional[int] = None, - ): - ... + ): ... @overload def __init__( @@ -125,8 +124,7 @@ def __init__( track_data: bool = True, local_data_ixs: Optional[Sequence[TensorType]] = None, local_data_len: Optional[int] = None, - ): - ... + ): ... @overload def __init__( @@ -143,8 +141,7 @@ def __init__( track_data: bool = True, local_data_ixs: Optional[Sequence[TensorType]] = None, local_data_len: Optional[int] = None, - ): - ... + ): ... @overload def __init__( @@ -157,8 +154,7 @@ def __init__( track_data: bool = True, local_data_ixs: Optional[Sequence[TensorType]] = None, local_data_len: Optional[int] = None, - ): - ... + ): ... @overload def __init__( @@ -172,8 +168,7 @@ def __init__( track_data: bool = True, local_data_ixs: Optional[Sequence[TensorType]] = None, local_data_len: Optional[int] = None, - ): - ... + ): ... @overload def __init__( @@ -190,20 +185,21 @@ def __init__( track_data: bool = True, local_data_ixs: Optional[Sequence[TensorType]] = None, local_data_len: Optional[int] = None, - ): - ... + ): ... def __init__( self, search_space: SearchSpaceType, datasets: Mapping[Tag, Dataset] | Dataset, models: Mapping[Tag, ProbabilisticModelType] | ProbabilisticModelType, - acquisition_rule: AcquisitionRule[ - TensorType | State[StateType | None, TensorType], - SearchSpaceType, - ProbabilisticModelType, - ] - | None = None, + acquisition_rule: ( + AcquisitionRule[ + TensorType | State[StateType | None, TensorType], + SearchSpaceType, + ProbabilisticModelType, + ] + | None + ) = None, acquisition_state: StateType | None = None, *, fit_model: bool = True, @@ -322,9 +318,9 @@ def __init__( ) self._acquisition_rule.initialize_subspaces(search_space) - filtered_datasets: Mapping[Tag, Dataset] | State[ - StateType | None, Mapping[Tag, Dataset] - ] = self._acquisition_rule.filter_datasets(self._models, datasets) + filtered_datasets: ( + Mapping[Tag, Dataset] | State[StateType | None, Mapping[Tag, Dataset]] + ) = self._acquisition_rule.filter_datasets(self._models, datasets) if callable(filtered_datasets): self._acquisition_state, self._filtered_datasets = filtered_datasets( self._acquisition_state @@ -451,15 +447,19 @@ def dataset_len(cls, datasets: Mapping[Tag, Dataset]) -> int: @classmethod def from_record( cls: Type[AskTellOptimizerType], - record: Record[StateType, ProbabilisticModelType] - | FrozenRecord[StateType, ProbabilisticModelType], + record: ( + Record[StateType, ProbabilisticModelType] + | FrozenRecord[StateType, ProbabilisticModelType] + ), search_space: SearchSpaceType, - acquisition_rule: AcquisitionRule[ - TensorType | State[StateType | None, TensorType], - SearchSpaceType, - ProbabilisticModelType, - ] - | None = None, + acquisition_rule: ( + AcquisitionRule[ + TensorType | State[StateType | None, TensorType], + SearchSpaceType, + ProbabilisticModelType, + ] + | None + ) = None, track_data: bool = True, local_data_ixs: Optional[Sequence[TensorType]] = None, local_data_len: Optional[int] = None, @@ -540,12 +540,14 @@ def from_state( cls: Type[AskTellOptimizerType], state: AskTellOptimizerState[StateType, ProbabilisticModelType], search_space: SearchSpaceType, - acquisition_rule: AcquisitionRule[ - TensorType | State[StateType | None, TensorType], - SearchSpaceType, - ProbabilisticModelType, - ] - | None = None, + acquisition_rule: ( + AcquisitionRule[ + TensorType | State[StateType | None, TensorType], + SearchSpaceType, + ProbabilisticModelType, + ] + | None + ) = None, track_data: bool = True, ) -> AskTellOptimizerType: """Creates new :class:`~AskTellOptimizer` instance from provided AskTellOptimizer state. diff --git a/trieste/bayesian_optimizer.py b/trieste/bayesian_optimizer.py index 1f06d9c458..0a11913b9f 100644 --- a/trieste/bayesian_optimizer.py +++ b/trieste/bayesian_optimizer.py @@ -381,8 +381,7 @@ def optimize( EarlyStopCallback[TrainableProbabilisticModel, object] ] = None, start_step: int = 0, - ) -> OptimizationResult[None, TrainableProbabilisticModel]: - ... + ) -> OptimizationResult[None, TrainableProbabilisticModel]: ... @overload def optimize( @@ -405,8 +404,7 @@ def optimize( # this should really be OptimizationResult[None], but tf.Tensor is untyped so the type # checker can't differentiate between TensorType and State[S | None, TensorType], and # the return types clash. object is close enough to None that object will do. - ) -> OptimizationResult[object, TrainableProbabilisticModelType]: - ... + ) -> OptimizationResult[object, TrainableProbabilisticModelType]: ... @overload def optimize( @@ -426,8 +424,7 @@ def optimize( EarlyStopCallback[TrainableProbabilisticModelType, object] ] = None, start_step: int = 0, - ) -> OptimizationResult[object, TrainableProbabilisticModelType]: - ... + ) -> OptimizationResult[object, TrainableProbabilisticModelType]: ... @overload def optimize( @@ -448,8 +445,7 @@ def optimize( EarlyStopCallback[TrainableProbabilisticModelType, StateType] ] = None, start_step: int = 0, - ) -> OptimizationResult[StateType, TrainableProbabilisticModelType]: - ... + ) -> OptimizationResult[StateType, TrainableProbabilisticModelType]: ... @overload def optimize( @@ -470,8 +466,7 @@ def optimize( EarlyStopCallback[TrainableProbabilisticModelType, StateType] ] = None, start_step: int = 0, - ) -> OptimizationResult[StateType, TrainableProbabilisticModelType]: - ... + ) -> OptimizationResult[StateType, TrainableProbabilisticModelType]: ... @overload def optimize( @@ -488,8 +483,7 @@ def optimize( EarlyStopCallback[TrainableProbabilisticModel, object] ] = None, start_step: int = 0, - ) -> OptimizationResult[None, TrainableProbabilisticModel]: - ... + ) -> OptimizationResult[None, TrainableProbabilisticModel]: ... @overload def optimize( @@ -509,8 +503,7 @@ def optimize( EarlyStopCallback[TrainableProbabilisticModelType, object] ] = None, start_step: int = 0, - ) -> OptimizationResult[object, TrainableProbabilisticModelType]: - ... + ) -> OptimizationResult[object, TrainableProbabilisticModelType]: ... @overload def optimize( @@ -530,8 +523,7 @@ def optimize( EarlyStopCallback[TrainableProbabilisticModelType, object] ] = None, start_step: int = 0, - ) -> OptimizationResult[object, TrainableProbabilisticModelType]: - ... + ) -> OptimizationResult[object, TrainableProbabilisticModelType]: ... @overload def optimize( @@ -552,8 +544,7 @@ def optimize( EarlyStopCallback[TrainableProbabilisticModelType, StateType] ] = None, start_step: int = 0, - ) -> OptimizationResult[StateType, TrainableProbabilisticModelType]: - ... + ) -> OptimizationResult[StateType, TrainableProbabilisticModelType]: ... @overload def optimize( @@ -574,20 +565,21 @@ def optimize( EarlyStopCallback[TrainableProbabilisticModelType, StateType] ] = None, start_step: int = 0, - ) -> OptimizationResult[StateType, TrainableProbabilisticModelType]: - ... + ) -> OptimizationResult[StateType, TrainableProbabilisticModelType]: ... def optimize( self, num_steps: int, datasets: Mapping[Tag, Dataset] | Dataset, models: Mapping[Tag, TrainableProbabilisticModelType] | TrainableProbabilisticModelType, - acquisition_rule: AcquisitionRule[ - TensorType | State[StateType | None, TensorType], - SearchSpaceType, - TrainableProbabilisticModelType, - ] - | None = None, + acquisition_rule: ( + AcquisitionRule[ + TensorType | State[StateType | None, TensorType], + SearchSpaceType, + TrainableProbabilisticModelType, + ] + | None + ) = None, acquisition_state: StateType | None = None, *, track_state: bool = True, @@ -769,9 +761,9 @@ def optimize( ) acquisition_rule.initialize_subspaces(self._search_space) - filtered_datasets_or_callable: Mapping[Tag, Dataset] | State[ - StateType | None, Mapping[Tag, Dataset] - ] = acquisition_rule.filter_datasets(models, datasets) + filtered_datasets_or_callable: ( + Mapping[Tag, Dataset] | State[StateType | None, Mapping[Tag, Dataset]] + ) = acquisition_rule.filter_datasets(models, datasets) if callable(filtered_datasets_or_callable): acquisition_state, filtered_datasets = filtered_datasets_or_callable( acquisition_state diff --git a/trieste/objectives/utils.py b/trieste/objectives/utils.py index 088391571e..902ef7e27d 100644 --- a/trieste/objectives/utils.py +++ b/trieste/objectives/utils.py @@ -31,13 +31,11 @@ @overload -def mk_observer(objective: Callable[[TensorType], TensorType]) -> SingleObserver: - ... +def mk_observer(objective: Callable[[TensorType], TensorType]) -> SingleObserver: ... @overload -def mk_observer(objective: Callable[[TensorType], TensorType], key: Tag) -> MultiObserver: - ... +def mk_observer(objective: Callable[[TensorType], TensorType], key: Tag) -> MultiObserver: ... def mk_observer( diff --git a/trieste/space.py b/trieste/space.py index 1c361dc6ef..dc5552c8c8 100644 --- a/trieste/space.py +++ b/trieste/space.py @@ -279,8 +279,7 @@ def product(self: SearchSpaceType, other: SearchSpaceType) -> SearchSpaceType: """ @overload - def __mul__(self: SearchSpaceType, other: SearchSpaceType) -> SearchSpaceType: - ... + def __mul__(self: SearchSpaceType, other: SearchSpaceType) -> SearchSpaceType: ... @overload def __mul__(self: SearchSpaceType, other: SearchSpace) -> SearchSpace: # type: ignore[misc] @@ -494,8 +493,7 @@ def __init__( upper: Sequence[float], constraints: Optional[Sequence[Constraint]] = None, ctol: float | TensorType = 1e-7, - ): - ... + ): ... @overload def __init__( @@ -504,8 +502,7 @@ def __init__( upper: TensorType, constraints: Optional[Sequence[Constraint]] = None, ctol: float | TensorType = 1e-7, - ): - ... + ): ... def __init__( self, From 6b00e3dc4f8f7d25d17f71747f99ebf70c255f12 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Sat, 22 Jun 2024 19:37:33 +0100 Subject: [PATCH 05/20] One more tensorflow.keras import --- .../models/multifidelity/test_multifidelity_models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/models/multifidelity/test_multifidelity_models.py b/tests/integration/models/multifidelity/test_multifidelity_models.py index 507b7ada4b..030a4d823e 100644 --- a/tests/integration/models/multifidelity/test_multifidelity_models.py +++ b/tests/integration/models/multifidelity/test_multifidelity_models.py @@ -2,7 +2,7 @@ import numpy as np import numpy.testing as npt import tensorflow as tf -from tensorflow.keras.metrics import mean_squared_error +from gpflow.keras import tf_keras import trieste from tests.util.misc import random_seed @@ -140,7 +140,7 @@ def test_multifidelity_nonlinear_autoregressive_results_better_than_linear() -> predictions = model.predict(test_xs_w_fid)[0] gt_obs = observer(test_xs_w_fid).observations - mses.append(tf.reduce_sum(mean_squared_error(gt_obs, predictions))) + mses.append(tf.reduce_sum(tf_keras.metrics.mean_squared_error(gt_obs, predictions))) assert mses[0] < mses[1] From 44903b29f0f69387f5a3954c8d2d615c07379f25 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Sun, 23 Jun 2024 17:51:23 +0100 Subject: [PATCH 06/20] Disable fail-fast --- .github/workflows/quality-checks.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/quality-checks.yaml b/.github/workflows/quality-checks.yaml index edaa52a94f..fe6e549c01 100644 --- a/.github/workflows/quality-checks.yaml +++ b/.github/workflows/quality-checks.yaml @@ -53,6 +53,7 @@ jobs: tests: runs-on: ubuntu-latest strategy: + fail-fast: false matrix: part: [ "1", "2", "3", "4" ] name: tests (part${{ matrix.part }}) @@ -67,6 +68,7 @@ jobs: tests_old: runs-on: ubuntu-latest strategy: + fail-fast: false matrix: part: [ "1", "2", "3", "4" ] name: tests_old (part${{ matrix.part }}) From b0437864408ea6091515095a553456c1463db9d3 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Mon, 24 Jun 2024 11:59:51 +0100 Subject: [PATCH 07/20] Try fixing types_old --- common_build/types_old/constraints.txt | 5 +++++ common_build/types_old/requirements.txt | 16 ++++++++++++++++ generate_constraints.sh | 1 + tox.ini | 2 +- 4 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 common_build/types_old/constraints.txt create mode 100644 common_build/types_old/requirements.txt diff --git a/common_build/types_old/constraints.txt b/common_build/types_old/constraints.txt new file mode 100644 index 0000000000..118b6d0a31 --- /dev/null +++ b/common_build/types_old/constraints.txt @@ -0,0 +1,5 @@ +mypy==1.4.1 +mypy-extensions==1.0.0 +tomli==2.0.1 +types-PyYAML==6.0.12.20240311 +typing_extensions==4.12.2 diff --git a/common_build/types_old/requirements.txt b/common_build/types_old/requirements.txt new file mode 100644 index 0000000000..650234ad88 --- /dev/null +++ b/common_build/types_old/requirements.txt @@ -0,0 +1,16 @@ +# Copyright 2020 The Trieste Contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +mypy<1.5 # later versions don't support Python 3.7 +types-PyYAML diff --git a/generate_constraints.sh b/generate_constraints.sh index 803197d3d2..5e20a5f7d7 100755 --- a/generate_constraints.sh +++ b/generate_constraints.sh @@ -39,6 +39,7 @@ generate_for_env docs false generate_for_env common_build/format false generate_for_env common_build/taskipy false generate_for_env common_build/types false +generate_for_env common_build/types_old false generate_for_env notebooks true generate_for_env tests/old true generate_for_env tests/latest true diff --git a/tox.ini b/tox.ini index d7a753af12..e57065b0bd 100644 --- a/tox.ini +++ b/tox.ini @@ -94,7 +94,7 @@ description = tests_old_3: Run old tests part 3 tests_old_4: Run old tests part 4 commands = - types_old: pip install -r common_build/types/requirements.txt -c common_build/types/constraints.txt + types_old: pip install -r common_build/types_old/requirements.txt -c common_build/types_old/constraints.txt types_old: pip install . -r tests/old/requirements.txt -c tests/old/constraints.txt types_old: mypy {posargs} # unlike tests this doesn't include the optional qhsri support From e4dca43eeb9dbd2fd1a903aa01df9d0a0380fccb Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Mon, 24 Jun 2024 12:21:44 +0100 Subject: [PATCH 08/20] Revert "Try fixing types_old" This reverts commit b0437864408ea6091515095a553456c1463db9d3. --- common_build/types_old/constraints.txt | 5 ----- common_build/types_old/requirements.txt | 16 ---------------- generate_constraints.sh | 1 - tox.ini | 2 +- 4 files changed, 1 insertion(+), 23 deletions(-) delete mode 100644 common_build/types_old/constraints.txt delete mode 100644 common_build/types_old/requirements.txt diff --git a/common_build/types_old/constraints.txt b/common_build/types_old/constraints.txt deleted file mode 100644 index 118b6d0a31..0000000000 --- a/common_build/types_old/constraints.txt +++ /dev/null @@ -1,5 +0,0 @@ -mypy==1.4.1 -mypy-extensions==1.0.0 -tomli==2.0.1 -types-PyYAML==6.0.12.20240311 -typing_extensions==4.12.2 diff --git a/common_build/types_old/requirements.txt b/common_build/types_old/requirements.txt deleted file mode 100644 index 650234ad88..0000000000 --- a/common_build/types_old/requirements.txt +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2020 The Trieste Contributors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -mypy<1.5 # later versions don't support Python 3.7 -types-PyYAML diff --git a/generate_constraints.sh b/generate_constraints.sh index 5e20a5f7d7..803197d3d2 100755 --- a/generate_constraints.sh +++ b/generate_constraints.sh @@ -39,7 +39,6 @@ generate_for_env docs false generate_for_env common_build/format false generate_for_env common_build/taskipy false generate_for_env common_build/types false -generate_for_env common_build/types_old false generate_for_env notebooks true generate_for_env tests/old true generate_for_env tests/latest true diff --git a/tox.ini b/tox.ini index e57065b0bd..d7a753af12 100644 --- a/tox.ini +++ b/tox.ini @@ -94,7 +94,7 @@ description = tests_old_3: Run old tests part 3 tests_old_4: Run old tests part 4 commands = - types_old: pip install -r common_build/types_old/requirements.txt -c common_build/types_old/constraints.txt + types_old: pip install -r common_build/types/requirements.txt -c common_build/types/constraints.txt types_old: pip install . -r tests/old/requirements.txt -c tests/old/constraints.txt types_old: mypy {posargs} # unlike tests this doesn't include the optional qhsri support From ceeb10a13e4752e2891d635e5694c9efc43fe9f7 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Mon, 24 Jun 2024 12:26:40 +0100 Subject: [PATCH 09/20] Try testing against Python 3.8 instead --- .github/workflows/develop-checks.yaml | 2 +- .github/workflows/quality-checks.yaml | 4 ++-- .github/workflows/release-checks.yaml | 6 +++--- README.md | 2 +- docs/index.rst | 2 +- setup.py | 4 ++-- tox.ini | 8 ++++---- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/develop-checks.yaml b/.github/workflows/develop-checks.yaml index 9ddda56c13..9ca05668fc 100644 --- a/.github/workflows/develop-checks.yaml +++ b/.github/workflows/develop-checks.yaml @@ -36,7 +36,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.7" + python-version: "3.8" - run: pip install tox - run: tox -e tests_old -- --runslow only diff --git a/.github/workflows/quality-checks.yaml b/.github/workflows/quality-checks.yaml index fe6e549c01..361ec161ca 100644 --- a/.github/workflows/quality-checks.yaml +++ b/.github/workflows/quality-checks.yaml @@ -36,7 +36,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.7" + python-version: "3.8" - run: pip install tox - run: tox -e types_old @@ -76,7 +76,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.7" + python-version: "3.8" - run: pip install tox - run: tox -e tests_old_${{ matrix.part }} diff --git a/.github/workflows/release-checks.yaml b/.github/workflows/release-checks.yaml index 7699138e89..6c968596af 100644 --- a/.github/workflows/release-checks.yaml +++ b/.github/workflows/release-checks.yaml @@ -36,7 +36,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.7" + python-version: "3.8" - run: pip install tox - run: tox -e types_old @@ -74,7 +74,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.7" + python-version: "3.8" - run: pip install tox - run: tox -e tests_old_${{ matrix.part }} @@ -94,7 +94,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.7" + python-version: "3.8" - run: pip install tox - run: tox -e tests_old -- --runslow only diff --git a/README.md b/README.md index 6ba2639b52..43655aaef3 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ query_point, observation, arg_min_idx = result.try_get_optimal_point() ## Installation -Trieste supports Python 3.7+ and TensorFlow 2.5+, and uses [semantic versioning](https://semver.org/). +Trieste supports Python 3.8+ and TensorFlow 2.5+, and uses [semantic versioning](https://semver.org/). ### For users diff --git a/docs/index.rst b/docs/index.rst index ac4e7d0f34..0051345ca1 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -44,7 +44,7 @@ To install Trieste, run $ pip install trieste -The library supports Python 3.7 onwards, and uses `semantic versioning `_. +The library supports Python 3.8 onwards, and uses `semantic versioning `_. Getting help ------------ diff --git a/setup.py b/setup.py index 9098c09e56..8f3ac40677 100644 --- a/setup.py +++ b/setup.py @@ -32,11 +32,11 @@ "trieste": ["py.typed", "VERSION"], }, classifiers=[ - "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", ], - python_requires="~=3.7", + python_requires="~=3.8", install_requires=[ "absl-py", "dill<0.3.6", diff --git a/tox.ini b/tox.ini index d7a753af12..f0c3f9a065 100644 --- a/tox.ini +++ b/tox.ini @@ -82,13 +82,13 @@ commands = quickdocs: pip install -r docs/requirements.txt -c docs/constraints.txt quickdocs: bash -c "cd docs; if (python notebooks/quickrun/quickrun.py && make html); then python notebooks/quickrun/quickrun.py --revert; else python notebooks/quickrun/quickrun.py --revert; exit 1; fi" -# additional tests using python 3.7 and older versions of tensorflow +# additional tests using python 3.8 and older versions of tensorflow [testenv:{types_old,tests_old,tests_old_1,tests_old_2,tests_old_3,tests_old_4}] -basepython = python3.7 +basepython = python3.8 description = - types_old: Check types [Python 3.7] - tests_old: Run tests [Python 3.7, no qhsri] + types_old: Check types [Python 3.8] + tests_old: Run tests [Python 3.8, no qhsri] tests_old_1: Run old tests part 1 tests_old_2: Run old tests part 2 tests_old_3: Run old tests part 3 From d07c84d4d37a674546e0700c8027deb5fb02caf3 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Tue, 25 Jun 2024 14:53:29 +0100 Subject: [PATCH 10/20] Try 3.9 --- .github/workflows/develop-checks.yaml | 2 +- .github/workflows/quality-checks.yaml | 4 ++-- .github/workflows/release-checks.yaml | 6 +++--- README.md | 2 +- docs/index.rst | 2 +- setup.py | 4 ++-- tests/old/constraints.txt | 9 ++++++--- tests/old/requirements.txt | 3 --- tox.ini | 8 ++++---- 9 files changed, 20 insertions(+), 20 deletions(-) diff --git a/.github/workflows/develop-checks.yaml b/.github/workflows/develop-checks.yaml index 9ca05668fc..48606df28e 100644 --- a/.github/workflows/develop-checks.yaml +++ b/.github/workflows/develop-checks.yaml @@ -36,7 +36,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.8" + python-version: "3.9" - run: pip install tox - run: tox -e tests_old -- --runslow only diff --git a/.github/workflows/quality-checks.yaml b/.github/workflows/quality-checks.yaml index 361ec161ca..c4b22bb67e 100644 --- a/.github/workflows/quality-checks.yaml +++ b/.github/workflows/quality-checks.yaml @@ -36,7 +36,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.8" + python-version: "3.9" - run: pip install tox - run: tox -e types_old @@ -76,7 +76,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.8" + python-version: "3.9" - run: pip install tox - run: tox -e tests_old_${{ matrix.part }} diff --git a/.github/workflows/release-checks.yaml b/.github/workflows/release-checks.yaml index 6c968596af..c034426c75 100644 --- a/.github/workflows/release-checks.yaml +++ b/.github/workflows/release-checks.yaml @@ -36,7 +36,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.8" + python-version: "3.9" - run: pip install tox - run: tox -e types_old @@ -74,7 +74,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.8" + python-version: "3.9" - run: pip install tox - run: tox -e tests_old_${{ matrix.part }} @@ -94,7 +94,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.8" + python-version: "3.9" - run: pip install tox - run: tox -e tests_old -- --runslow only diff --git a/README.md b/README.md index 43655aaef3..466babbb1d 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ query_point, observation, arg_min_idx = result.try_get_optimal_point() ## Installation -Trieste supports Python 3.8+ and TensorFlow 2.5+, and uses [semantic versioning](https://semver.org/). +Trieste supports Python 3.9+ and TensorFlow 2.5+, and uses [semantic versioning](https://semver.org/). ### For users diff --git a/docs/index.rst b/docs/index.rst index 0051345ca1..a530e681d4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -44,7 +44,7 @@ To install Trieste, run $ pip install trieste -The library supports Python 3.8 onwards, and uses `semantic versioning `_. +The library supports Python 3.9 onwards, and uses `semantic versioning `_. Getting help ------------ diff --git a/setup.py b/setup.py index 8f3ac40677..e164ff96af 100644 --- a/setup.py +++ b/setup.py @@ -32,11 +32,11 @@ "trieste": ["py.typed", "VERSION"], }, classifiers=[ - "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", ], - python_requires="~=3.8", + python_requires="~=3.9", install_requires=[ "absl-py", "dill<0.3.6", diff --git a/tests/old/constraints.txt b/tests/old/constraints.txt index 6071de7119..324feb4e7b 100644 --- a/tests/old/constraints.txt +++ b/tests/old/constraints.txt @@ -34,6 +34,8 @@ greenlet==3.0.3 grpcio==1.64.1 h5py==3.11.0 idna==3.7 +importlib_metadata==7.2.1 +importlib_resources==6.4.0 iniconfig==2.0.0 keras==2.8.0 Keras-Preprocessing==1.1.2 @@ -48,11 +50,11 @@ mdurl==0.1.2 ml-dtypes==0.3.2 multipledispatch==1.0.0 namex==0.0.8 -numpy==1.21.6 +numpy==1.26.4 oauthlib==3.2.2 opt-einsum==3.3.0 optree==0.11.0 -osqp==0.6.7 +osqp==0.6.7.post0 packaging==24.1 pillow==10.3.0 pluggy==1.5.0 @@ -65,7 +67,7 @@ pyparsing==3.1.2 pytest==8.2.2 python-dateutil==2.9.0.post0 PyYAML==6.0.1 -qdldl==0.1.7.post3 +qdldl==0.1.7.post4 requests==2.32.3 requests-oauthlib==2.0.0 rich==13.7.1 @@ -88,3 +90,4 @@ typing_extensions==4.12.2 urllib3==2.2.2 Werkzeug==3.0.3 wrapt==1.16.0 +zipp==3.19.2 diff --git a/tests/old/requirements.txt b/tests/old/requirements.txt index 3c86693664..11bdf0e5f9 100644 --- a/tests/old/requirements.txt +++ b/tests/old/requirements.txt @@ -19,6 +19,3 @@ PyYAML # pin to older version of TF (for now) tensorflow ~= 2.8.0 tensorflow-probability ~= 0.13.0 - -# later numpy doesn't support Python 3.7 -numpy < 1.22.0 diff --git a/tox.ini b/tox.ini index f0c3f9a065..c2914ab6fc 100644 --- a/tox.ini +++ b/tox.ini @@ -82,13 +82,13 @@ commands = quickdocs: pip install -r docs/requirements.txt -c docs/constraints.txt quickdocs: bash -c "cd docs; if (python notebooks/quickrun/quickrun.py && make html); then python notebooks/quickrun/quickrun.py --revert; else python notebooks/quickrun/quickrun.py --revert; exit 1; fi" -# additional tests using python 3.8 and older versions of tensorflow +# additional tests using python 3.9 and older versions of tensorflow [testenv:{types_old,tests_old,tests_old_1,tests_old_2,tests_old_3,tests_old_4}] -basepython = python3.8 +basepython = python3.9 description = - types_old: Check types [Python 3.8] - tests_old: Run tests [Python 3.8, no qhsri] + types_old: Check types [Python 3.9] + tests_old: Run tests [Python 3.9, no qhsri] tests_old_1: Run old tests part 1 tests_old_2: Run old tests part 2 tests_old_3: Run old tests part 3 From a37ffc78a0af73128356539deb6f31f0f6a7b6ce Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Wed, 4 Sep 2024 11:13:24 +0100 Subject: [PATCH 11/20] Fix recently added tests --- tests/unit/models/test_interfaces.py | 3 ++- trieste/space.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/unit/models/test_interfaces.py b/tests/unit/models/test_interfaces.py index 8cab36db42..4311fb5a54 100644 --- a/tests/unit/models/test_interfaces.py +++ b/tests/unit/models/test_interfaces.py @@ -23,6 +23,7 @@ import pytest import tensorflow as tf import tensorflow_probability as tfp +from gpflow.keras import tf_keras from tests.util.misc import assert_datasets_allclose, quadratic, random_seed from tests.util.models.gpflow.models import ( @@ -304,7 +305,7 @@ def test_encoded_supports_predict_y() -> None: def test_encoded_probabilistic_model_keras_embedding() -> None: - encoder = tf.keras.layers.Embedding(3, 2) + encoder = tf_keras.layers.Embedding(3, 2) model = _EncodedModel(encoder=encoder) query_points = tf.random.uniform([3, 5], minval=0, maxval=3, dtype=tf.int32) mean, var = model.predict(query_points) diff --git a/trieste/space.py b/trieste/space.py index 3dffb987c4..213498a852 100644 --- a/trieste/space.py +++ b/trieste/space.py @@ -25,6 +25,7 @@ import tensorflow as tf import tensorflow_probability as tfp from check_shapes import check_shapes +from gpflow.keras import tf_keras from typing_extensions import Protocol, runtime_checkable from .types import TensorType @@ -643,7 +644,7 @@ def encoder(x: TensorType) -> TensorType: ) columns = tf.split(flat_x, flat_x.shape[-1], axis=1) encoders = [ - tf.keras.layers.CategoryEncoding(num_tokens=len(ts), output_mode="one_hot") + tf_keras.layers.CategoryEncoding(num_tokens=len(ts), output_mode="one_hot") for ts in self.tags ] encoded = tf.concat( From 9045c4c879ae43106a65e55c8441dfb7ee900b5a Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Wed, 4 Sep 2024 11:22:27 +0100 Subject: [PATCH 12/20] Fix DE serialisation --- trieste/models/keras/architectures.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/trieste/models/keras/architectures.py b/trieste/models/keras/architectures.py index 89465fefcf..1cee8a8b9e 100644 --- a/trieste/models/keras/architectures.py +++ b/trieste/models/keras/architectures.py @@ -29,8 +29,8 @@ from gpflow.keras import tf_keras try: - from keras.src.saving.serialization_lib import SafeModeScope -except ImportError: # pragma: no cover (tested but not by coverage) + SafeModeScope = tf_keras.src.saving.serialization_lib.SafeModeScope +except AttributeError: # pragma: no cover (tested but not by coverage) SafeModeScope = contextlib.nullcontext from tensorflow_probability.python.layers.distribution_layer import DistributionLambda, _serialize @@ -147,7 +147,7 @@ def __setstate__(self, state: dict[str, Any]) -> None: # When unpickling restore the model using model_from_json. self.__dict__.update(state) # TF 2.15 disallows loading lambdas without "safe-mode" being disabled - # unfortunately, tfp.layers.DistributionLambda seems to use lambdas + # unfortunately, tfp.layers.DistributionLambda uses lambdas with SafeModeScope(False): self._model = tf_keras.models.model_from_json( state["_model"], custom_objects={"MultivariateNormalTriL": MultivariateNormalTriL} From 1c3bc2f54175f21087d9ff8cdd8a51341cebb526 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Wed, 4 Sep 2024 12:32:31 +0100 Subject: [PATCH 13/20] Another fix --- trieste/models/keras/utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/trieste/models/keras/utils.py b/trieste/models/keras/utils.py index 5fb0bbd972..7fdd53d71f 100644 --- a/trieste/models/keras/utils.py +++ b/trieste/models/keras/utils.py @@ -18,6 +18,7 @@ import tensorflow as tf import tensorflow_probability as tfp +from gpflow.keras import tf_keras from ...data import Dataset from ...types import TensorType @@ -122,6 +123,7 @@ def sample_model_index( return indices +@tf_keras.saving.register_keras_serializable() def negative_log_likelihood( y_true: TensorType, y_pred: tfp.distributions.Distribution ) -> TensorType: From 584c762f656dbf6f4947cf63e3a9a7c74e61add6 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Wed, 4 Sep 2024 12:45:33 +0100 Subject: [PATCH 14/20] pin numpy in old tests --- tests/old/constraints.txt | 61 +++++++++++++++++++------------------- tests/old/requirements.txt | 3 ++ 2 files changed, 33 insertions(+), 31 deletions(-) diff --git a/tests/old/constraints.txt b/tests/old/constraints.txt index 324feb4e7b..4a209746d2 100644 --- a/tests/old/constraints.txt +++ b/tests/old/constraints.txt @@ -2,16 +2,16 @@ about-time==4.2.1 absl-py==2.1.0 alive-progress==3.1.5 astunparse==1.6.3 -autograd==1.6.2 -cachetools==5.3.3 -certifi==2024.6.2 +autograd==1.7.0 +cachetools==5.5.0 +certifi==2024.8.30 charset-normalizer==3.3.2 check-shapes==1.1.1 clarabel==0.9.0 cloudpickle==3.0.0 cma==3.2.2 -contourpy==1.2.1 -cvxpy==1.5.2 +contourpy==1.3.0 +cvxpy==1.5.3 cycler==0.12.1 decorator==5.1.1 Deprecated==1.2.14 @@ -19,61 +19,60 @@ dill==0.3.5.1 dm-tree==0.1.8 dropstackframe==0.1.0 ecos==2.0.14 -exceptiongroup==1.2.1 +exceptiongroup==1.2.2 flatbuffers==24.3.25 -fonttools==4.53.0 -future==1.0.0 -gast==0.5.4 -google-auth==2.30.0 +fonttools==4.53.1 +gast==0.6.0 +google-auth==2.34.0 google-auth-oauthlib==0.4.6 google-pasta==0.2.0 gpflow==2.9.2 gpflux==0.4.4 grapheme==0.6.0 greenlet==3.0.3 -grpcio==1.64.1 +grpcio==1.66.1 h5py==3.11.0 -idna==3.7 -importlib_metadata==7.2.1 -importlib_resources==6.4.0 +idna==3.8 +importlib_metadata==8.4.0 +importlib_resources==6.4.4 iniconfig==2.0.0 keras==2.8.0 Keras-Preprocessing==1.1.2 -kiwisolver==1.4.5 -lark==1.1.9 +kiwisolver==1.4.7 +lark==1.2.2 libclang==18.1.1 -Markdown==3.6 +Markdown==3.7 markdown-it-py==3.0.0 MarkupSafe==2.1.5 -matplotlib==3.9.0 +matplotlib==3.9.2 mdurl==0.1.2 ml-dtypes==0.3.2 multipledispatch==1.0.0 namex==0.0.8 -numpy==1.26.4 +numpy==1.21.6 oauthlib==3.2.2 opt-einsum==3.3.0 -optree==0.11.0 -osqp==0.6.7.post0 +optree==0.12.1 +osqp==0.6.7.post1 packaging==24.1 -pillow==10.3.0 +pillow==10.4.0 pluggy==1.5.0 protobuf==3.19.6 pyasn1==0.6.0 pyasn1_modules==0.4.0 Pygments==2.18.0 -pymoo==0.6.1.1 -pyparsing==3.1.2 -pytest==8.2.2 +pymoo==0.6.1.3 +pyparsing==3.1.4 +pytest==8.3.2 python-dateutil==2.9.0.post0 -PyYAML==6.0.1 +PyYAML==6.0.2 qdldl==0.1.7.post4 requests==2.32.3 requests-oauthlib==2.0.0 -rich==13.7.1 +rich==13.8.0 rsa==4.9 scipy==1.13.1 -scs==3.2.4.post3 +scs==3.2.7 six==1.16.0 tabulate==0.9.0 tensorboard==2.8.0 @@ -81,13 +80,13 @@ tensorboard-data-server==0.6.1 tensorboard-plugin-wit==1.8.1 tensorflow==2.8.4 tensorflow-estimator==2.8.0 -tensorflow-io-gcs-filesystem==0.37.0 +tensorflow-io-gcs-filesystem==0.37.1 tensorflow-probability==0.13.0 termcolor==2.4.0 tf_keras==2.16.0 tomli==2.0.1 typing_extensions==4.12.2 urllib3==2.2.2 -Werkzeug==3.0.3 +Werkzeug==3.0.4 wrapt==1.16.0 -zipp==3.19.2 +zipp==3.20.1 diff --git a/tests/old/requirements.txt b/tests/old/requirements.txt index 11bdf0e5f9..f0635cd4c5 100644 --- a/tests/old/requirements.txt +++ b/tests/old/requirements.txt @@ -19,3 +19,6 @@ PyYAML # pin to older version of TF (for now) tensorflow ~= 2.8.0 tensorflow-probability ~= 0.13.0 + +# tensorflow-probability depends on np.bool which is deprecated +numpy < 1.22.0 \ No newline at end of file From c05318c096cc50092f153c60268ba485c9f0b4b7 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Wed, 4 Sep 2024 12:52:15 +0100 Subject: [PATCH 15/20] Try pinning scipy too --- tests/old/constraints.txt | 2 +- tests/old/requirements.txt | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/old/constraints.txt b/tests/old/constraints.txt index 4a209746d2..510a02070a 100644 --- a/tests/old/constraints.txt +++ b/tests/old/constraints.txt @@ -71,7 +71,7 @@ requests==2.32.3 requests-oauthlib==2.0.0 rich==13.8.0 rsa==4.9 -scipy==1.13.1 +scipy==1.7.3 scs==3.2.7 six==1.16.0 tabulate==0.9.0 diff --git a/tests/old/requirements.txt b/tests/old/requirements.txt index f0635cd4c5..ea3a934cd6 100644 --- a/tests/old/requirements.txt +++ b/tests/old/requirements.txt @@ -21,4 +21,5 @@ tensorflow ~= 2.8.0 tensorflow-probability ~= 0.13.0 # tensorflow-probability depends on np.bool which is deprecated -numpy < 1.22.0 \ No newline at end of file +numpy < 1.22.0 +scipy < 1.8 From 6884835813f9a688fb365d6378aad6511cb82874 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Wed, 4 Sep 2024 13:09:45 +0100 Subject: [PATCH 16/20] Register_keras_serializable not available in old TF --- trieste/models/keras/utils.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/trieste/models/keras/utils.py b/trieste/models/keras/utils.py index 7fdd53d71f..741edfdfd6 100644 --- a/trieste/models/keras/utils.py +++ b/trieste/models/keras/utils.py @@ -14,7 +14,7 @@ from __future__ import annotations -from typing import Optional +from typing import Callable, Optional import tensorflow as tf import tensorflow_probability as tfp @@ -23,6 +23,14 @@ from ...data import Dataset from ...types import TensorType +try: + register_keras_serializable = tf_keras.saving.register_keras_serializable() +except AttributeError: # pragma: no cover (tested but not by coverage) + + # not required in earlier version of TF + def register_keras_serializable(func: Callable[..., object]) -> Callable[..., object]: + return func + def get_tensor_spec_from_data(dataset: Dataset) -> tuple[tf.TensorSpec, tf.TensorSpec]: r""" @@ -123,7 +131,7 @@ def sample_model_index( return indices -@tf_keras.saving.register_keras_serializable() +@register_keras_serializable def negative_log_likelihood( y_true: TensorType, y_pred: tfp.distributions.Distribution ) -> TensorType: From 9893de897caafdc22793ac50aa7a6d146704b6a8 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Fri, 6 Sep 2024 15:28:35 +0100 Subject: [PATCH 17/20] Fix history serialisation --- trieste/models/gpflux/models.py | 5 ++--- trieste/models/keras/models.py | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/trieste/models/gpflux/models.py b/trieste/models/gpflux/models.py index 723bdc46ff..2feb6d54aa 100644 --- a/trieste/models/gpflux/models.py +++ b/trieste/models/gpflux/models.py @@ -18,7 +18,6 @@ import dill import gpflow -import keras.callbacks import tensorflow as tf from check_shapes import inherit_check_shapes from gpflow.inducing_variables import InducingPoints @@ -232,7 +231,7 @@ def __getstate__(self) -> dict[str, Any]: self._model_keras.history.model = history_model # don't try to serialize any other copies of the history callback - if isinstance(state.get("_last_optimization_result"), keras.callbacks.History): + if isinstance(state.get("_last_optimization_result"), tf_keras.callbacks.History): state["_last_optimization_result"] = ... return state @@ -367,7 +366,7 @@ def update(self, dataset: Dataset) -> None: inputs = layer(inputs) - def optimize(self, dataset: Dataset) -> keras.callbacks.History: + def optimize(self, dataset: Dataset) -> tf_keras.callbacks.History: """ Optimize the model with the specified `dataset`. :param dataset: The data with which to optimize the `model`. diff --git a/trieste/models/keras/models.py b/trieste/models/keras/models.py index 80fc960b49..f72e440965 100644 --- a/trieste/models/keras/models.py +++ b/trieste/models/keras/models.py @@ -18,7 +18,6 @@ from typing import Any, Dict, Mapping, Optional import dill -import keras.callbacks import tensorflow as tf import tensorflow_probability as tfp import tensorflow_probability.python.distributions as tfd @@ -378,7 +377,7 @@ def update_encoded(self, dataset: Dataset) -> None: """ return - def optimize_encoded(self, dataset: Dataset) -> keras.callbacks.History: + def optimize_encoded(self, dataset: Dataset) -> tf_keras.callbacks.History: """ Optimize the underlying Keras ensemble model with the specified ``dataset``. @@ -523,7 +522,7 @@ def __getstate__(self) -> dict[str, Any]: callback._writers = writers # don't serialize any history optimization result - if isinstance(state.get("_last_optimization_result"), keras.callbacks.History): + if isinstance(state.get("_last_optimization_result"), tf_keras.callbacks.History): state["_last_optimization_result"] = ... return state From 1a827192b6a7286777f7b84ad2550432a966c0a7 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Fri, 6 Sep 2024 15:28:59 +0100 Subject: [PATCH 18/20] Add upper bound to TF in setup --- setup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index e164ff96af..47aab3fca0 100644 --- a/setup.py +++ b/setup.py @@ -43,9 +43,9 @@ "gpflow>=2.9.2", "gpflux>=0.4.4", "numpy", - "tensorflow>=2.5; platform_system!='Darwin' or platform_machine!='arm64'", - "tensorflow-macos>=2.5; platform_system=='Darwin' and platform_machine=='arm64'", - "tensorflow-probability>=0.13", + "tensorflow>=2.5,<2.17; platform_system!='Darwin' or platform_machine!='arm64'", + "tensorflow-macos>=2.5,<2.17; platform_system=='Darwin' and platform_machine=='arm64'", + "tensorflow-probability>=0.13,<0.25", "greenlet>=1.1.0", ], extras_require={ From acfcf9228791b37f4f2bd913d1b97a41ce97bc0f Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Fri, 6 Sep 2024 16:57:45 +0100 Subject: [PATCH 19/20] Mention keras import in README --- README.md | 7 +++++++ tests/old/requirements.txt | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 466babbb1d..7d5a763460 100644 --- a/README.md +++ b/README.md @@ -111,6 +111,13 @@ Alternatively, you can copy and paste the tutorials into fresh notebooks and avo $ pip install trieste[plotting] ``` +### Importing Keras + +Like [tensorflow-probability](https://www.tensorflow.org/probability), Trieste currently uses Keras 2. When using Tensorflow versions 2.16 onwards (which default to Keras 3) this needs to be imported from `tf_keras` rather than `tf.keras`. Alternatively, for a shortcut that works with all versions of Tensorflow, you can write: +```python +from gpflow.keras import tf_keras +``` + ## The Trieste Community ### Getting help diff --git a/tests/old/requirements.txt b/tests/old/requirements.txt index ea3a934cd6..41e44ce4d7 100644 --- a/tests/old/requirements.txt +++ b/tests/old/requirements.txt @@ -20,6 +20,6 @@ PyYAML tensorflow ~= 2.8.0 tensorflow-probability ~= 0.13.0 -# tensorflow-probability depends on np.bool which is deprecated +# tensorflow-probability 0.13 depends on np.bool which is deprecated numpy < 1.22.0 scipy < 1.8 From 59b4c478be7f9f2570fde50d9dca633f03948eb3 Mon Sep 17 00:00:00 2001 From: Uri Granta Date: Mon, 9 Sep 2024 10:54:03 +0100 Subject: [PATCH 20/20] Check whether register_keras_serializable is actually necessary --- trieste/models/keras/utils.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/trieste/models/keras/utils.py b/trieste/models/keras/utils.py index 741edfdfd6..5fb0bbd972 100644 --- a/trieste/models/keras/utils.py +++ b/trieste/models/keras/utils.py @@ -14,23 +14,14 @@ from __future__ import annotations -from typing import Callable, Optional +from typing import Optional import tensorflow as tf import tensorflow_probability as tfp -from gpflow.keras import tf_keras from ...data import Dataset from ...types import TensorType -try: - register_keras_serializable = tf_keras.saving.register_keras_serializable() -except AttributeError: # pragma: no cover (tested but not by coverage) - - # not required in earlier version of TF - def register_keras_serializable(func: Callable[..., object]) -> Callable[..., object]: - return func - def get_tensor_spec_from_data(dataset: Dataset) -> tuple[tf.TensorSpec, tf.TensorSpec]: r""" @@ -131,7 +122,6 @@ def sample_model_index( return indices -@register_keras_serializable def negative_log_likelihood( y_true: TensorType, y_pred: tfp.distributions.Distribution ) -> TensorType: