diff --git a/.gitignore b/.gitignore index b26562d..2dafb2a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,22 @@ + +# Created by https://www.toptal.com/developers/gitignore/api/linux,visualstudiocode,python +# Edit at https://www.toptal.com/developers/gitignore?templates=linux,visualstudiocode,python + +### Linux ### +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + ### Python ### # Byte-compiled / optimized / DLL files __pycache__/ @@ -185,4 +204,4 @@ cython_debug/ # Ignore code-workspaces *.code-workspace -# End of https://www.toptal.com/developers/gitignore/api/python,visualstudiocode \ No newline at end of file +# End of https://www.toptal.com/developers/gitignore/api/linux,visualstudiocode,python \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..e9e6a80 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,11 @@ +{ + "python.testing.unittestArgs": [ + "-v", + "-s", + "./tests", + "-p", + "test_*.py" + ], + "python.testing.pytestEnabled": false, + "python.testing.unittestEnabled": true +} \ No newline at end of file diff --git a/README.md b/README.md index 0445dec..34935de 100644 --- a/README.md +++ b/README.md @@ -4,151 +4,127 @@ ![Swin Transformer architecture](https://github.com/microsoft/Swin-Transformer/blob/3b0685bf2b99b4cf5770e47260c0f0118e6ff1bb/figures/teaser.png) -This is a TensorFlow 2.0 implementation of the [Swin Transformer architecture](https://arxiv.org/abs/2103.14030). +This is a Kears/TensorFlow 2.0 implementation of the [Swin Transformer architecture](https://arxiv.org/abs/2103.14030) inspired by the official Pytorch [code](https://github.com/microsoft/Swin-Transformer). It is built using the Keras API following best practices, such as allowing complete serialization and deserialization of custom layers and deferring weight creation until the first call with real inputs. -This implementation is inspired by the [official version](https://github.com/microsoft/Swin-Transformer) offered by authors of the paper, while simultaneously improving in some areas such as shape and type checks. - ## Installation Clone the repository: ```bash git clone git@github.com:MidnessX/swin.git ``` -Enter into the directory: +Enter into it: ```bash cd swin ``` Install the package via: ```bash -pip install -e . +pip install swin-transformer ``` ## Usage Class ``Swin`` in ``swin.model`` is a subclass of ``tf.keras.Model``, so you can instantiate Swin Transformers and train them through well known interface methods, such as ``compile()``, ``fit()``, ``save()``. -The only remark is the first argument to the ``Swin`` class constructor, which is expected to be a ``tf.Tensor`` object or equivalent, such as a symbolic tensor produced by ``tf.keras.Input``. -This tensor is only used to determine the shape of future inputs and can be an example coming from your dataset or any random tensor sharing its shape. - For convenience, ``swin.model`` also includes classes for variants of the Swin architecture described in the article (``SwinT``, ``SwinS``, ``SwinB``, ``SwinL``) which initialize a ``Swin`` object with the variant's parameters. ## Example ```python -import tensorflow as tf - -from swin.model import SwinT - -# Load the dataset as a list of mini batches -train_x = ... -train_y = ... -num_classes = ... +import tensorflow.keras as keras +from swin import Swin -# Take a mini batch from the dataset to build the model -mini_batch = train_x[0] +# Dataset loading, omitted for brevity +x = [...] +y = [...] +num_classes = [...] -model = SwinT(mini_batch, num_classes) +model = Swin(num_classes) -# Build the model by calling it for the first time -model(mini_batch) - -# Compile the model model.compile( - loss=tf.keras.losses.SGD(learning_rate=1e-3, momentum=0.9), - optimizer=tf.keras.optimizers.CategoricalCrossentropy(), - metrics=[tf.keras.metrics.CategoricalAccuracy()] + optimizer=keras.optimizers.AdamW(), + loss=keras.losses.CategoricalCrossentropy(), + metrics=[keras.metrics.CategoricalAccuracy()] ) -# Train the model -history = model.fit(train_x, train_y, epochs=300) +model.fit( + x, + y, + epochs=1000, +) -# Save the trained model model.save("path/to/model/directory") ``` ## Notes -- The input type accepted by the model is ``tf.float32``. Any pre-processing of data should include a conversion step of images from ``tf.uint8`` to ``tf.float32`` if necessary. - -- Swin architectures have many parameters, so training them is not an easy task. Expect a lot of trial & error before honing in correct hyperparameters. - -- ``SwinModule`` layers place the dimensionality reduction layer (``SwinPatchMerging``) after transformer layers (``SwinTransformer``), rather than before as found in the paper. This choice is to maintain consistency with the original network implementation. - -## Testing - -Test modules can be found under the ``tests`` folder of this repository. -They can be executed to test the expected functionality of custom layers for the Swin architecture, as well as basic functionalities of the whole model. - -Admittedly these tests could be expanded and further improved to cover more cases, but they should be enough to verify general functionality. - -## Assumptions and simplifications - -While implementing the Swin Transformer architecture a number of assumptions and simplifications have been made: - -1. Input images must have 3 channels. +This network has been built to be consistent with its [official Pytorch implementation](https://github.com/microsoft/Swin-Transformer). +This translates into the following statements: -2. The size of windows in (Shifted) Windows Multi-head Attention is fixed to 7[^1]. +- The ratio of hidden to output neurons in MLP blocks is set to 4. +- Projection of input data to obtain `Q`, `K`, and `V` includes a bias term in all transformer blocks. +- `Q` and `K` are scaled by `sqrt(d)`, where `d` is the size of `Q` and `K`. +- No _Dropout_ is applied to attention heads. +- [_Stochastic Depth_](https://arxiv.org/pdf/1603.09382.pdf) is applied to randomly disable patches after the attention computation, with probability set to 10%. +- No absolute position information is added to embeddings. +- _Layer Normalizaton_ is applied to embeddings. +- The extraction of patches from images and the generation of embeddings both happen in the `SwinPatchEmbeddings` layer. +- Patch merging happens at the end of each stage, rather than at the beginning. + This simplifies the definition of layers and does not change the overall architecture. -3. The ratio of hidden to output neurons in ``SwinMlp`` layers is fixed to 4[^1]. +Additionally, the following decisions have been made to simplify development: -4. A learnable bias is added to ``queries``, ``keys`` and ``values`` when computing (Shifted) Window Multi-head Attention[^2]. - -5. ``queries`` and ``keys`` are scaled by a factor of ``head_dimension**-0.5``[^1]. - -6. No dropout is applied to attention heads[^2]. - -7. The probability of the Stochastic Depth computation-skipping technique during training is fixed to 0.1[^2]. - -8. No absolute position information is included in embeddings[^3]. - -9. ``LayerNormalization`` is applied after building patch embeddings[^2]. - -[^1]: To stay consistent with the content of the paper. - -[^2]: In the original implementation this happens when using default arguments. - -[^3]: Researchers note in the paper that adding absolute position information to embedding decreases network capabilities. +- The network only accepts square `tf.float32` images with 3 channels as inputs (i.e. height and width must be identical). +- No padding is applied to embeddings during the SW-MSA calculation, as their size is assumed to be a multiple of window size. ## Choosing parameters -### Dependencies - -If using the base class (``Swin``), it is necessary to provide a series of parameters to instantiate the model. -The choice of these values is important and a series of dependencies exist between them. +When using any of the subclasses (``SwinT``, ``SwinS``, ``SwinB``, ``SwinL``), the architecture is fixed to their respective variants found in the paper. -The size of windows (``window_size``) used during (Shifted) Windows Multi-head Self Attention is the starting point and, as stated in the section about [assumptions](https://github.com/MidnessX/swin#assumptions-and-simplifications), it is fixed to ``7`` (as in the original paper). +When using the `Swin` class directly, however, you can customize the resulting architecture by specifing all the network's parameters. +This sections provides an overview of the dependencies existing between these parameters. -The resolution of inputs to network stages, expressed as the number of patches along each axis, must be a multiple of ``window_size`` and gets halved by every stage through ``SwinPatchMerging`` layers. -The suggestion is to choose a resolution for the final stage and multiply it by ``2`` for every stage in the desired model, obtaining the input resolution of the first stage (``resolution_stage_1``). +- Each stage has an input with shape `(batch_size, num_patches, num_patches, embed_dim)`. +`num_patches` must be a multiple of `window_size`. +- Each stage halves the `num_patches` dimension by merging four adjacent patches together. + It can be easier to choose a desired number of patches in the last stage and multiply it by 2 for every stage in the network to obtain the initial `num_patches` value. +- By multiplying `num_patches` by `patch_size` you can find out the size in pixels of input images. +- `embed_dim` must be a multiple of `num_heads` for every stage. +- The number of transformer blocks in each stage can be set freely, as they do not alter the shape of patches. -Input images to the ``Swin`` model must be squares, with their height/width given by multiplying ``resolution_stage_1`` with the desired size of patches (``patch_size``). +To better understand how to choose network parameters, consider the following example: -The number of ``SwinTransformer`` layers in each stage (``depths``) is arbitrary. +1. The depth is set to 3 stages. +2. Windows are set to be 8 patches wide (i.e. `window_size = 8`). +3. The last stage should have a `2 * window_size = 16` patch-wide input. + This means that the input to the second stage and the first stage will be 32x32 and 64x64 patch-wide respectively. +4. We require each patch to cover a 6x6 pixel area, so the initial image will be `num_patches * 6 = 64 * 6 = 384` pixel wide. +5. For the first stage, we choose 2 attention layers; 4 for the second, and 2 for the third. +6. The number of attention heads is set to 4. + This implies that there will be 8 attention heads in the second stage and 16 attention heads in the third stage. +7. Using the value found in the Swin paper, the `embed_dim / num_heads` ratio is set to 32, leading to an initial `embed_dim` of `32 * 4 = 128`. -The number of transformer heads (``num_heads``) should instead double at each stage. -Authors of the paper use a fixed ratio between embedding dimensions and the number of heads in each stage of the network, amounting to ``32``. -This means that, chosen the number of transformer heads in the first stage, it should be multiplied by ``32`` to obtain ``embed_dim``. +Summarizing, this is equal to: -The following example should help clarify these concepts. +- `image_size = 384` +- `patch_size = 6` +- `window_size = 8` +- `embed_dim = 128` +- `depths = [2, 4, 2]` +- `num_heads = [4, 8, 16]` -### Parameter choice example - -Let's imagine we want a Swin Transformer having ``3`` stages. -The last stage (``stage_3``) should receive inputs of ``14x14`` patches (``14 = window_size * 2``); this also means that ``stage_2`` receives inputs of ``28x28`` patches and ``stage_1`` of ``56x56``. +## Testing -We want to convert our images into patches having size ``6x6``, so images should have size ``56 * 6 = 336``. +Test modules can be found under the ``tests`` folder of this repository. +They can be executed to verify the expected functionality of custom layers for the Swin architecture, as well as basic functionalities of the whole model. -Our network will have ``2`` transformers in the first stage, ``4`` in the second and ``2`` in the third. -We choose ``4`` heads for the first stage and thus the second one will have ``8`` heads while the third ``16``. +You can run them with the following command: +```bash +python -m unittest discover -s ./tests +``` -With these numbers we can derive the size of embeddings used in the first stage by multiplying ``32`` by ``4``, giving us ``128``. +## Extras -Summarizing, we have: +To better understand how SW-MSA works, a Jupyter notebook found in the `extras` folder can be used to visualize window partitioning, traslation and mask construction. -- ``image_size = 336`` -- ``patch_size = 6`` -- ``embed_dim = 128`` -- ``depths = [2, 4, 2]`` -- ``num_heads = [4, 8, 16]`` \ No newline at end of file diff --git a/extras/cyclic_shift.ipynb b/extras/cyclic_shift.ipynb new file mode 100644 index 0000000..79ed558 --- /dev/null +++ b/extras/cyclic_shift.ipynb @@ -0,0 +1,404 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cyclic shifting and SW-MSA masking\n", + "\n", + "This notebook is meant to help understand what happens during cycling shifting of patches in the computation of Shifted Windows Multi-head Self Attention." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-05 16:36:18.110086: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", + "2023-07-05 16:36:18.136710: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", + "To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "import matplotlib as mpl\n", + "import matplotlib.pyplot as plt\n", + "import matplotlib.lines as lines\n", + "from math import ceil, floor\n", + "from swin.modules import SwinTransformer\n", + "import tensorflow as tf\n", + "\n", + "%matplotlib inline" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We define a bunch of constants which determine the size of the input feature map to the SW-MSA." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "patch_size = 2 # in px\n", + "map_size = 8 # in px\n", + "n_patches = ceil(map_size / patch_size)\n", + "\n", + "res = int(map_size / patch_size) # in # of patches\n", + "window_size = 2 # in # of patches\n", + "shift_size = 1 # in # of patches\n", + "\n", + "windows_res = int(map_size / (window_size * patch_size)) # in windows" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We first plot the feature map with windows overlaid on top.\n", + "Red lines denote the windows without any shifting applied, while cyan lines denote windows after shifting.\n", + "\n", + "We then plot the same feature map after having applied cyclic shift.\n", + "We can see that patches are simultaneously moved to the top and to the left, with those overflowing ending at the bottom and to the right.\n", + "Again, red lines denote the windows in which the feature map will be split into.\n", + "\n", + "We can clearly see that, after shifting, windows correspond to those painted in Cyan in the first image.\n", + "It's also worth mentioning that some windows are made of patches which were not adjacent in the original feature map.\n", + "This reason is why we need a mask during SW-MSA." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfoAAAGOCAYAAAB/psGeAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAq2UlEQVR4nO3dfXRU9b3v8c9kkkxCSIYHSUJKAtRaqeFBBMlFbAs1hZtLKfScUvVSjdCjbQ1CmlMLaQsBFQJdq6xY5QJaC3YpIraC1nOFYipwuIokwbigPfLQIqZWQD0yQxIyCTP7/kGJDCQke2bPTGbzfq01i9mbPd/fl5BfPtkz+8FhGIYhAABgSwmxbgAAAEQOQQ8AgI0R9AAA2BhBDwCAjRH0AADYGEEPAICNEfQAANgYQQ8AgI0lxroBAABiqaWlRa2trZbUSk5OVkpKiiW1rELQAwCuWi0tLUpN7y+da7akXnZ2to4dO9ajwp6gBwBctVpbW6VzzXLdUCw5k8Mr5m/Vib88rdbWVoIeAIAexZksR5hB31NvHMPBeAAA2BhBDwCAjRH0AADYGEEPAICNEfQAANgYQQ8AgI0R9AAA2BhBDwCAjRH0AADYGEEPAICNEfQAANgYQQ8AgI0R9AAA2BhBDwCAjRH0AADYGEEPAICNEfQAANgYQQ8AQAzs3r1b06ZNU05OjhwOh7Zu3drptj/4wQ/kcDhUVVVlehyCHgCAGGhqatKoUaO0evXqK263ZcsW7d27Vzk5OSGNkxjSqwAAQFiKiopUVFR0xW0++OADPfDAA9q+fbumTp0a0jgEPQAAfbOlxJTwapxrsaaXfwoEArrrrrv04IMPKj8/P+Q6BD0AABbyer1Byy6XSy6Xy3SdlStXKjExUfPmzQurHz6jBwDAQrm5uXK73e2PyspK0zXq6ur06KOPasOGDXI4HGH1wx49AAAWamhoUEZGRvtyKHvz//mf/6lTp04pLy+vfZ3f79e///u/q6qqSu+99163axH0AABYKCMjIyjoQ3HXXXepsLAwaN2UKVN01113afbs2aZqEfQAAMRAY2Ojjh492r587Ngx1dfXq1+/fsrLy1P//v2Dtk9KSlJ2drauv/56U+MQ9AAAxEBtba0mTZrUvlxWViZJKi4u1oYNGywbh6AHACAGJk6cKMMwur29mc/lL8ZR9wAA2BhBDwCAjRH0AADYGEEPAICNEfQAANgYQQ8AgI0R9AAA2BhBDwCAjRH0AADYGEEPAICNEfQAANgYQQ8AgI0R9AAA2BhBDwCAjRH0AADYGPejBwBc9RL6ZMuRlBpWDaPtrEXdWIs9egAAbIygBwDAxgh6AABsjKAHAMDGCHoAAGyMoAcAwMYIegAAbIygBwDAxgh6AABsjKAHAMDGCHoAAGyMoAcAwMYIegAAbIygBwDAxgh6AABsjKAHAMDGCHoAAGyMoAcAwMYIegAAbIygBwDAxgh6AABsjKAHAMDGCHoAwFWvd5/eSu+bHtajd5/epsbcvXu3pk2bppycHDkcDm3durX979ra2rRgwQKNGDFCaWlpysnJ0d13361//OMfpv9tBD0AADHQ1NSkUaNGafXq1Zf9XXNzs/bv369FixZp//79evHFF3Xo0CF985vfND1OohXNAgAAc4qKilRUVNTh37ndbu3YsSNo3eOPP65x48bp/fffV15eXrfHieoe/erVqzVkyBClpKSooKBA+/btC7vmld76CEdlZaVuvvlmpaenKzMzUzNmzNChQ4csqb1mzRqNHDlSGRkZysjI0Pjx4/Xqq69aUvtiK1askMPhUGlpadi1lixZIofDEfQYNmxY+E1K+uCDD/Td735X/fv3V2pqqkaMGKHa2tqw6w4ZMuSynh0Oh0pKSsKu7ff7tWjRIg0dOlSpqam69tpr9fDDD8swjLBrh4M5Fr35JVk3xyI5v6T4m2M9dX7FmsfjkcPhUJ8+fUy9LmpB//zzz6usrEwVFRXav3+/Ro0apSlTpujUqVNh1b3SWx/h2LVrl0pKSrR3717t2LFDbW1tmjx5spqamsKuPWjQIK1YsUJ1dXWqra3V1772NU2fPl1//vOfLej8vJqaGq1bt04jR460rGZ+fr4+/PDD9seePXvCrvnpp59qwoQJSkpK0quvvqq//OUv+uUvf6m+ffuGXbumpiao3wu/Hc+cOTPs2itXrtSaNWv0+OOP67/+67+0cuVK/eIXv9Bjjz0Wdu1QMcfOi8b8kqyfY5GYX1J8zrGeOL/M8Hq9QQ+fzxd2zZaWFi1YsEB33nmnMjIyzL3YiJJx48YZJSUl7ct+v9/IyckxKisrLRtDkrFlyxbL6l3s1KlThiRj165dEanft29f49e//rUltc6cOWNcd911xo4dO4yvfvWrxvz588OuWVFRYYwaNSrsOpdasGCBceutt1petyPz5883rr32WiMQCIRda+rUqcacOXOC1v3Lv/yLMWvWrLBrh4o51jkr55dhWD/HIjW/DCM+51g055fH4zEkGQPuftrI+rcXwnoMuPtpQ9Jlj4qKii77uNLcam1tNaZNm2aMHj3a8Hg8pv+NUdmjb21tVV1dnQoLC9vXJSQkqLCwUG+++WY0Wgibx+ORJPXr18/Sun6/X5s2bVJTU5PGjx9vSc2SkhJNnTo16OtthSNHjignJ0ef//znNWvWLL3//vth13z55Zc1duxYzZw5U5mZmRo9erSefPJJC7oN1traqmeeeUZz5syRw+EIu94tt9yi6upqHT58WJL0zjvvaM+ePZ1+3hZpzLGORWJ+SZGZY5GYX1J8zrGeNr/MamhokMfjaX+Ul5eHXKutrU3f+c53dPz4ce3YscP83ryidDDexx9/LL/fr6ysrKD1WVlZevfdd6PRQlgCgYBKS0s1YcIEDR8+3JKaBw4c0Pjx49XS0qLevXtry5YtuuGGG8Kuu2nTJu3fv181NTUWdPmZgoICbdiwQddff70+/PBDLV26VF/+8pd18OBBpaenh1z3b3/7m9asWaOysjL99Kc/VU1NjebNm6fk5GQVFxdb1v/WrVt1+vRp3XPPPZbUW7hwobxer4YNGyan0ym/369ly5Zp1qxZltQ3izkWLFLzS4rMHIvU/JLic471tPll1oXjQ8J1IeSPHDmi119/Xf379w+pDkfdd0NJSYkOHjxo2WdmknT99dervr5eHo9Hv/vd71RcXKxdu3aF9cOooaFB8+fP144dO5SSkmJZr5KCfpMeOXKkCgoKNHjwYG3evFnf+973Qq4bCAQ0duxYLV++XJI0evRoHTx4UGvXrrX0h9BTTz2loqIi5eTkWFJv8+bNevbZZ7Vx40bl5+ervr5epaWlysnJsbTvq4XVcywS80uK3ByL1PyS4nOOXS3zq7GxUUePHm1fPnbsmOrr69WvXz8NHDhQ3/72t7V//3698sor8vv9OnHihKTz73olJyd3e5yoBP0111wjp9OpkydPBq0/efKksrOzo9FCyObOnatXXnlFu3fv1qBBgyyrm5ycrC984QuSpDFjxqimpkaPPvqo1q1bF3LNuro6nTp1SjfddFP7Or/fr927d+vxxx+Xz+eT0+kMu3dJ6tOnj774xS8GfZOGYuDAgZf98P3Sl76k3//+92HVvdjx48f12muv6cUXX7Ss5oMPPqiFCxfqjjvukCSNGDFCx48fV2VlZUx+EDHHgkVifknRm2NWzS8pPudYT5tfkVJbW6tJkya1L5eVlUmSiouLtWTJEr388suSpBtvvDHoda+//romTpzY7XGi8hl9cnKyxowZo+rq6vZ1gUBA1dXVln5uZiXDMDR37lxt2bJFf/rTnzR06NCIjhcIBMI+MvO2227TgQMHVF9f3/4YO3asZs2apfr6estCXjr/m+hf//pXDRw4MKw6EyZMuOyUqsOHD2vw4MFh1b3Y+vXrlZmZqalTp1pWs7m5WQkJwdPH6XQqEAhYNoYZzLErs2J+SdGbY1bNLyk+51hPm1+RMnHiRBmGcdljw4YNGjJkSId/ZxiGqZCXovjWfVlZmYqLizV27FiNGzdOVVVVampq0uzZs8Oqe6W3PsxcUOBSJSUl2rhxo1566SWlp6e3v2XidruVmpoaVs/l5eUqKipSXl6ezpw5o40bN2rnzp3avn17WHXT09Mv+3wzLS1N/fv3D/tzzx//+MeaNm2aBg8erH/84x+qqKiQ0+nUnXfeGVbdH/3oR7rlllu0fPlyfec739G+ffv0xBNP6Iknngir7gWBQEDr169XcXGxEhOt+3afNm2ali1bpry8POXn5+vtt9/WqlWrNGfOHMvGMIs5dl6k5pcUuTkWqfklxecc64nzK66ZPk4/DI899piRl5dnJCcnG+PGjTP27t0bds3XX3+9w9MZiouLw6rbUU1Jxvr168Puec6cOcbgwYON5ORkY8CAAcZtt91m/PGPfwy7bkesOr3u9ttvNwYOHGgkJycbn/vc54zbb7/dOHr0aPgNGobxhz/8wRg+fLjhcrmMYcOGGU888YQldQ3DMLZv325IMg4dOmRZTcMwDK/Xa8yfP9/Iy8szUlJSjM9//vPGz372M8Pn81k6jlnMsejOL8OwZo5Fcn4ZRvzNsWjOr0icXhfKKXCR5DCMq/xSQwCAq5bX65Xb7daAu59WQnKvsGoFWpv10W+L5fF4LDnq3irc1AYAABsj6AEAsDGCHgAAGyPoAQCwMYIeAAAbI+gBALAxgh4AABsLKehXr16tIUOGKCUlRQUFBdq3b1+3X+vz+bRkyRJLLkcZjbqRrE3P0akdjz2HKh7/rfQcndr0fPUyfcGc559/XnfffbfWrl2rgoICVVVV6YUXXtChQ4eUmZnZ5esvXJzA6gsKRKpuJGvTc3Rqx2PPPbEf/n8iXzeSten5ymPY+YI5pi9MvGrVKt17773t189eu3at/uM//kO/+c1vtHDhQssbBAAg0tL7pMnpSgurht/n0EcW9WMlU2/dt7a2qq6uToWFhZ8VSEhQYWGh3nzzTcubAwAA4TG1R//xxx/L7/crKysraH1WVpbefffdDl/j8/mCPl85ffq0JMnj8Zhs9cq8Xm/Qn/FQm56jUzuSPV/4Po7V7TOjNb+k+Pz/oefI141k7VjPL7uI+G1qKysrtXTp0svWh3N7yyvJzc2NSN1I1qbn6NSOZM///d//rT59+kSsfmeiPb+k+Pz/oefI141k7VjNL7swFfTXXHONnE6nTp48GbT+5MmTys7O7vA15eXlKisra1/2eDzKy8vT0dq3lZ6Z1eFreqIzLW2xbqH7mps14MYvSZLe23tARmp4nztFW1PruVi30G1NjY2aPOkm9e/fPybjdza/ntr0R7n6XxOTnkJxqqk11i10W2JLs4rv+JokaeWTf9S5lJQYd2TOpMGx+V4NRVNTo75dVBCz+WUXpoI+OTlZY8aMUXV1tWbMmCHp/Fsq1dXVmjt3boevcblccrlcl61Pz8xSxsCB5juOlbNxFPRNTbpwvGfvzGwZafEV9EZL/AR9IPX8W5UOhyMm43c2v1z9r5Hrmvj5RTopNX5On0o829w+v5z9rlEgJbwjtaMtNXNArFvotkBjbOeXXZh+676srEzFxcUaO3asxo0bp6qqKjU1NbUfhQ8AAHoO00F/++2366OPPtLixYt14sQJ3Xjjjdq2bdtlB+gBAIDYC+lgvLlz53b6Vj0AAOg5uNY9AAA2RtADAGBjBD0AADZG0AMAYGMEPQAANkbQAwBgYwQ9AAA2RtADAGBjBD0AADZG0AMAYGMEPQAANkbQAwBgY6aDfvfu3Zo2bZpycnLkcDi0devWCLQFAACsYDrom5qaNGrUKK1evToS/QAAAAuZvk1tUVGRioqKItELAAAx0bdvqhJTUsOqca4lYFE31grpfvRm+Hw++Xy+9mWv1xvpIYGrBvMLQFcifjBeZWWl3G53+yM3NzfSQwJXDeYXgK5EPOjLy8vl8XjaHw0NDZEeErhqML8AdCXiQe9yuZSRkRH0AGAN5hcQv7o6i80wDC1evFgDBw5UamqqCgsLdeTIEdPjcB49AAAx0NVZbL/4xS/0q1/9SmvXrtVbb72ltLQ0TZkyRS0tLabGMX0wXmNjo44ePdq+fOzYMdXX16tfv37Ky8szWw4AgKvSlc5iMwxDVVVV+vnPf67p06dLkn77298qKytLW7du1R133NHtcUzv0dfW1mr06NEaPXq0JKmsrEyjR4/W4sWLzZYCAMB2vF5v0OPiM2O669ixYzpx4oQKCwvb17ndbhUUFOjNN980Vcv0Hv3EiRNlGIbZlwEAcFW49OyXiooKLVmyxFSNEydOSJKysrKC1mdlZbX/XXdF/Dx6AACuJg0NDUEHxrpcrhh2w8F4AABY6tIzYUIJ+uzsbEnSyZMng9afPHmy/e+6i6AHAKCHGTp0qLKzs1VdXd2+zuv16q233tL48eNN1eKtewAAYqCrs9hKS0v1yCOP6LrrrtPQoUO1aNEi5eTkaMaMGabGIegBAIiB2tpaTZo0qX25rKxMklRcXKwNGzboJz/5iZqamnTffffp9OnTuvXWW7Vt2zalpKSYGoegBwAgBro6i83hcOihhx7SQw89FNY4fEYPAICNEfQAANgYQQ8AgI0R9AAA2BhBDwCAjZkK+srKSt18881KT09XZmamZsyYoUOHDkWqNwAAECZTQb9r1y6VlJRo79692rFjh9ra2jR58mQ1NTVFqj8AABAGU+fRb9u2LWh5w4YNyszMVF1dnb7yla9Y2hgAAAhfWBfM8Xg8kqR+/fpZ0kxPFJD0saQzklIlOWLbTrdc3GOzpHi4qbAh6ew/n/sd8fF1lqSWeGkUwFUr5KAPBAIqLS3VhAkTNHz48E638/l88vl87cter1eStP+9T5XaHNtb93XHpwnSt67rJ6UnxbqVbuuVkKQLH6YUZKaqOS01pv3Ymje2v0Z1Nr821X2ogLstVm2Z9snps11v1EOktJ7VD/75/KXdx3U22dzlSGPthMfX9UY9RGtzY9TG6tcnRUmp4f2sbDvrt6gba4V81H1JSYkOHjyoTZs2XXG7yspKud3u9kdubm6oQwK4BPMLQFdC2qOfO3euXnnlFe3evVuDBg264rbl5eXtF+qXzu9xxNMPI9dFO2xPHjihlCtcl7incDY3tz9f/tZxnUvtFcNuuseX4NDPCgZLkh547YiS/D3/6yxJvqZGrY7h+PE+vwBEnqmgNwxDDzzwgLZs2aKdO3dq6NChXb7G5XLJ5er5b9F35uKPYFMMQymBnh9Azot+GXEFDDnjoOeLJfkNJcdJ0Bsx/trG+/wCEHmmgr6kpEQbN27USy+9pPT0dJ04cUKS5Ha7lRrmZxsAAMB6pj6jX7NmjTwejyZOnKiBAwe2P55//vlI9QcAAMJg+q17AAAQP7jWPQAANkbQAwBgYwQ9AAA2RtADAGBjBD0AADZG0AMAYGMEPQAANkbQAwBgYwQ9AAA2RtADAGBjBD0AADZG0AMAYGOm7143cuRIZWRkKCMjQ+PHj9err74aqd4AAECYTAX9oEGDtGLFCtXV1am2tlZf+9rXNH36dP35z3+OVH8AACAMpm5TO23atKDlZcuWac2aNdq7d6/y8/MtbQwAAITPVNBfzO/364UXXlBTU5PGjx/f6XY+n08+n6992ev1hjokgEswvwB0xXTQHzhwQOPHj1dLS4t69+6tLVu26IYbbuh0+8rKSi1dujSsJgF0jPkFWGNARqqSe6WGVaM1yW9RN9YyfdT99ddfr/r6er311lv64Q9/qOLiYv3lL3/pdPvy8nJ5PJ72R0NDQ1gNA/gM8wtAV0zv0ScnJ+sLX/iCJGnMmDGqqanRo48+qnXr1nW4vcvlksvlCq9LAB1ifgHoStjn0QcCgaDPCAEAQM9hao++vLxcRUVFysvL05kzZ7Rx40bt3LlT27dvj1R/AAAgDKaC/tSpU7r77rv14Ycfyu12a+TIkdq+fbu+/vWvR6o/AAAQBlNB/9RTT0WqDwAArhp+v19LlizRM888oxMnTignJ0f33HOPfv7zn8vhcFg6Vsjn0QMAgNCsXLlSa9as0dNPP638/HzV1tZq9uzZcrvdmjdvnqVjEfQAAETZG2+8oenTp2vq1KmSpCFDhui5557Tvn37LB+Lu9cBAGAhr9cb9OjozLRbbrlF1dXVOnz4sCTpnXfe0Z49e1RUVGR5P+zRAwBgodzc3KDliooKLVmyJGjdwoUL5fV6NWzYMDmdTvn9fi1btkyzZs2yvB+CHgAACzU0NCgjI6N9uaOLWm3evFnPPvusNm7cqPz8fNXX16u0tFQ5OTkqLi62tB+CHgAAC2VkZAQFfUcefPBBLVy4UHfccYckacSIETp+/LgqKystD3o+owcAIMqam5uVkBAcwU6nU4FAwPKx2KMHACDKpk2bpmXLlikvL0/5+fl6++23tWrVKs2ZM8fysQh6AACi7LHHHtOiRYt0//3369SpU8rJydH3v/99LV682PKxCHoAAKIsPT1dVVVVqqqqivhYfEYPAICNhRX0K1askMPhUGlpqUXtAAAAK4Uc9DU1NVq3bp1GjhxpZT8AAMBCIQV9Y2OjZs2apSeffFJ9+/a1uicAAGCRkA7GKykp0dSpU1VYWKhHHnkkpIETWs4qobkppNdGk9Mh9Wo6f1UjZ3OznIYR44665jzb3P488aLnPZk/4bPbMia1nFWS3/pzSSMh0NIzv74u31kFfGdj3Ua3pbTGU68t7c9T21qusGXPlNxDv2c7FEffwz2Z6aDftGmT9u/fr5qamm5t7/P5gi7o7/V6JUn/Y/JYXfm6QT1Hz/91pHP/9s1xsW6hW5p69VJp0/mv9KL/fYvSmuPjh5FX0i9iOH5n8+s3Fd+Om/kVz/b+nztj3YKteSVtiHUTNmAq6BsaGjR//nzt2LFDKSkp3XpNZWWlli5dGlJzAK6M+QVYIzvDJVevy69Jb4Yvsc2ibqzlMIzuvxe9detWfetb35LT6Wxf5/f75XA4lJCQIJ/PF/R3Usd7HLm5ubpv1R+kPtdY8E+IrHNOhzb963BJ0m0b9ivxXM9/S9nlO6vnFpy/1eH//NkWtSR375eyWPInObWn/OuSpOE/+YMSWv0x7qh7/L5m/XntLHk8ni6vbR0Jnc2vz02uVEuqO+r9hMr49GSsW+i2Xv5WNfy/SklSbv5sNSckxbgjc9w3fTnWLXRboPWsjj93X0Tnl9frldvtVtkLdXL16h1WLV9zo1bNHBOznwedMbVHf9ttt+nAgQNB62bPnq1hw4ZpwYIFl4W8dP6uPR3duafVlSql9DLZbvS1OR1qTkuTJPlcqfI7e37QX6wlOUUtyamxbqNL/qTPjgs9m5SiBMVJ0Bux7bOz+dWcmKyWxPD2TqLJcCbHuoWQNCckqdkZX0GflNTzf/G/IGDE18/bnspU0Kenp2v48OFB69LS0tS/f//L1gMAgNjjyngAANhY2Ne637lzpwVtAACASGCPHgAAGyPoAQCwMYIeAAAbI+gBALAxgh4AABsj6AEAsDGCHgAAGyPoAQCwMYIeAAAbI+gBALAxgh4AABsj6AEAsDFTQb9kyRI5HI6gx7BhwyLVGwAACJPpu9fl5+frtdde+6xAYtg3wAMAABFiOqUTExOVnZ0diV4AAIDFTAf9kSNHlJOTo5SUFI0fP16VlZXKy8vrdHufzyefz9e+7PV6Q+sUwGWYXwC6Yuoz+oKCAm3YsEHbtm3TmjVrdOzYMX35y1/WmTNnOn1NZWWl3G53+yM3NzfspgGcx/wC0BVTe/RFRUXtz0eOHKmCggINHjxYmzdv1ve+970OX1NeXq6ysrL2Za/Xyw8jwCLML8AaAzOSlJqWHFaNs4lJFnVjrbCOpOvTp4+++MUv6ujRo51u43K55HK5whkGQCeYXwC6EtZ59I2NjfrrX/+qgQMHWtUPAACwkKmg//GPf6xdu3bpvffe0xtvvKFvfetbcjqduvPOOyPVHwAACIOpt+7//ve/684779Qnn3yiAQMG6NZbb9XevXs1YMCASPUHAADCYCroN23aFKk+AABABHCtewAAbIygBwDAxgh6AABsjKAHACAGPvjgA333u99V//79lZqaqhEjRqi2ttbycbj1HAAAUfbpp59qwoQJmjRpkl599VUNGDBAR44cUd++fS0fi6AHACDKVq5cqdzcXK1fv7593dChQyMyFm/dAwAQZS+//LLGjh2rmTNnKjMzU6NHj9aTTz4ZkbEIegAALOT1eoMeF99K+oK//e1vWrNmja677jpt375dP/zhDzVv3jw9/fTTlvdD0AMAYKHc3Nyg20dXVlZetk0gENBNN92k5cuXa/To0brvvvt07733au3atZb3w2f0AABYqKGhQRkZGe3LHd1hcuDAgbrhhhuC1n3pS1/S73//e8v7IegBALBQRkZGUNB3ZMKECTp06FDQusOHD2vw4MGW92P6rftonfcHAIBd/ehHP9LevXu1fPlyHT16VBs3btQTTzyhkpISy8cytUcfzfP+AACwq5tvvllbtmxReXm5HnroIQ0dOlRVVVWaNWuW5WOZCvponvcHAICdfeMb39A3vvGNiI9jKuhffvllTZkyRTNnztSuXbv0uc99Tvfff7/uvffeSPUXc8ZFz88lxsdJCk5/gpp69ZIk+ZOc8if1/L79F31tA3HQ7wVGwBnrFgDgikwF/YXz/srKyvTTn/5UNTU1mjdvnpKTk1VcXNzha3w+X9A5hF6vN7yOo8zvdLQ//9M9N8WwE3N6lzTFuoWQHVtyW6xb6D6vV/pl7IaP9/kFIPJMBX0gENDYsWO1fPlySdLo0aN18OBBrV27ttOgr6ys1NKlSy9b//qev6kl7dMQWo6uQO9k6V+Hx7oNoEOdza+aqm8rPXtgDDoKzZmz52LdQrc5mpqkz5//mr++eakCvdJi3JE5w3LSY91Ct3m9XmX9NtZdxD9TQR/KeX/l5eUqKytrX/Z6vcrNzTXZZgy1+tufuuZslMPX838g9Trn0/uvlEqScm/9qZqdybFtqBsMV5Jat5RKkpInLZSjpTW2DXWTcc6nWHYa9/ML6CEy01zq1fvy893NaI7pT4POmQr6UM77c7lcHV4sIF44Ln7uOxcXQe84d05pzc3nn7e0yeF0dPGKnsXR0ho3QS9/W0yHj/f5BSDyTB31FM3z/gAAQPhMBf2F8/6ee+45DR8+XA8//HDEzvsDAADhM30J3Gid9wcAAMIXPycsAwAA0wh6AABsjKAHAMDGCHoAAGyMoAcAwMYIegAAbIygBwDAxgh6AABsjKAHAMDGCHoAAGyMoAcAwMYIegAAbMxU0A8ZMkQOh+OyB7epBQCgZzJ197qamhr5/f725YMHD+rrX/+6Zs6caXljAAAgfKaCfsCAAUHLK1as0LXXXquvfvWrljYFAACsYfp+9Be0trbqmWeeUVlZmRwOR6fb+Xw++Xy+9mWv1xvqkAAuwfwC0JWQD8bbunWrTp8+rXvuueeK21VWVsrtdrc/cnNzQx0SwCWYXwC6EnLQP/XUUyoqKlJOTs4VtysvL5fH42l/NDQ0hDokgEswvwB0JaS37o8fP67XXntNL774YpfbulwuuVyuUIYB0AXmF2CNAanJSksNby41+X1dbxQDIe3Rr1+/XpmZmZo6darV/QAAAAuZDvpAIKD169eruLhYiYkhH8sHAACiwHTQv/baa3r//fc1Z86cSPQDAAAsZHqXfPLkyTIMIxK9AAAAi3GtewAAbIygBwDAxgh6AABsjKAHAMDGCHoAAGyMoAcAwMYIegAAbIygBwDAxgh6AABibMWKFXI4HCotLbW8NkEPAEAM1dTUaN26dRo5cmRE6hP0AADESGNjo2bNmqUnn3xSffv2jcgYpoLe7/dr0aJFGjp0qFJTU3Xttdfq4Ycf5tr3AAD8k9frDXr4fJ3fp76kpERTp05VYWFhxPoxdVOblStXas2aNXr66aeVn5+v2tpazZ49W263W/PmzYtUjwAAxI3c3Nyg5YqKCi1ZsuSy7TZt2qT9+/erpqYmov2YCvo33nhD06dP19SpUyVJQ4YM0XPPPad9+/ZFpDkAAOJNQ0ODMjIy2pddLleH28yfP187duxQSkpKRPsx9db9Lbfcourqah0+fFiS9M4772jPnj0qKiqKSHMAAMSbjIyMoEdHQV9XV6dTp07ppptuUmJiohITE7Vr1y796le/UmJiovx+v2X9mNqjX7hwobxer4YNGyan0ym/369ly5Zp1qxZnb7G5/MFfT7h8XgkSYG2swq0Xv6P72kMOSWv9/zztrNS27kYd9S1wDmfvP98bpzzyTACMe2nO4w2/2df53M+yd8W4466x/C3nv8zRsepdDa/zjSekbxpMekpFGdaev68usDR1Nw+vxobzyhg4Q/kaPB64+eYqjMXfibY8Diw2267TQcOHAhaN3v2bA0bNkwLFiyQ0+m0bCxTQb9582Y9++yz2rhxo/Lz81VfX6/S0lLl5OSouLi4w9dUVlZq6dKll61v+N380DqOhV+f/6Pzwyl6lhZJ7gsLb6yIYScmuRdLklpj3EYoPvnkE7nd7q43tFhn82v0TcOj3stV6eYvxrqDq0Ks5lckpaena/jw4Hmalpam/v37X7Y+XA7DxK9Kubm5WrhwoUpKStrXPfLII3rmmWf07rvvdviaS/c4Tp8+rcGDB+v999+39D/O6/UqNzf3ss9GenJteo5O7Uj27PF4lJeXp08//VR9+vSxtHZ3RGt+SfH5/0PPka8bydrRmF9er1dut1v/t+6Y0nqH13tTo1f/a8xQeTyekL4OEydO1I033qiqqqqw+riUqT365uZmJSQEf6zvdDoVCHT+1rDL5erw8wm32235N5v02WcjkRCp2vQcndqR7PnSeREt0Z5fUnz+/9Bz5OtGsnY05leGK0m9U5LCquFsC+/1O3fuDOv1nTEV9NOmTdOyZcuUl5en/Px8vf3221q1apXmzJkTkeYAAEB4TAX9Y489pkWLFun+++/XqVOnlJOTo+9///tavHhxpPoDAABhMBX06enpqqqqCuvzA5fLpYqKig7fbgxHpOpGsjY9R6d2PPYcqnj8t9JzdGrT89XL1MF4AADYyYWD8fYc/Lt6p4d3fEHjGa9uHT4o5IPxIoWb2gAAYGMEPQAANkbQAwBgYwQ9AAA2RtADAGBjBD0AADZG0AMAYGMEPQAANkbQAwBgYwQ9AAA2RtADAGBjBD0AADZG0AMAYGMEPQAANkbQAwBgYwQ9AAA2RtADAGBjBD0AADaWGOsGAACItbTURPVODS8SjXM9M1LZowcAwMYIegAAbIygBwDAxgh6AABsjKAHAMDGCHoAAGyMoAcAwMYIegAAbIygBwDAxgh6AABsjKAHAMDGCHoAAGyMoAcAwMYIegAAbIygBwDAxgh6AACirLKyUjfffLPS09OVmZmpGTNm6NChQxEZi6AHACDKdu3apZKSEu3du1c7duxQW1ubJk+erKamJsvHSrS8IgAAuKJt27YFLW/YsEGZmZmqq6vTV77yFUvHIugBALCQ1+sNWna5XHK5XFd8jcfjkST169fP8n546x4AAAvl5ubK7Xa3PyorK6+4fSAQUGlpqSZMmKDhw4db3g979AAAWKihoUEZGRnty13tzZeUlOjgwYPas2dPRPoh6AEAsFBGRkZQ0F/J3Llz9corr2j37t0aNGhQRPoh6AEAV70MV5LSU5LCquFo7f7rDcPQAw88oC1btmjnzp0aOnRoWGNfCUEPAECUlZSUaOPGjXrppZeUnp6uEydOSJLcbrdSU1MtHcthGIZhaUUAAOKE1+uV2+3WX//+idK7+XZ7Z854vbp2UH95PJ4u37p3OBwdrl+/fr3uueeesPq4FHv0AABEWTT3sTm9DgAAGyPoAQCwMYIeAAAbI+gBALAxgh4AABsj6AEAsDGCHgAAGyPoAQCwMYIeAAAbI+gBALAxgh4AABsj6AEAsDGCHgAAGyPoAQCwMYIeAAAbI+gBALAxgh4AABsj6AEAsDGCHgAAG0uMdQMAAMRa79REpaeGF4lGW8+MVPboAQCwMYIeAAAbI+gBALAxgh4AABsj6AEAsDGCHgAAGyPoAQCwMYIeAAAbI+gBALAxgh4AABsj6AEAsDGCHgAAGyPoAQCwMYIeAAAbI+gBALAxgh4AABsj6AEAsDGCHgAAGyPoAQCIkdWrV2vIkCFKSUlRQUGB9u3bZ/kYBD0AADHw/PPPq6ysTBUVFdq/f79GjRqlKVOm6NSpU5aOQ9ADABADq1at0r333qvZs2frhhtu0Nq1a9WrVy/95je/sXQcgh4AgChrbW1VXV2dCgsL29clJCSosLBQb775pqVjJVpaDQCAOOT1ei2rcWktl8sll8sVtO7jjz+W3+9XVlZW0PqsrCy9++67YfdyMYIeAHDVSk5OVnZ2tq4bmmtJvd69eys3N7hWRUWFlixZYkn9UBD0AICrVkpKio4dO6bW1lZL6hmGIYfDEbTu0r15SbrmmmvkdDp18uTJoPUnT55Udna2Jb1cQNADAK5qKSkpSklJieqYycnJGjNmjKqrqzVjxgxJUiAQUHV1tebOnWvpWAQ9AAAxUFZWpuLiYo0dO1bjxo1TVVWVmpqaNHv2bEvHIegBAIiB22+/XR999JEWL16sEydO6MYbb9S2bdsuO0AvXA7DMAxLKwIAgB6D8+gBALAxgh4AABsj6AEAsDGCHgAAGyPoAQCwMYIeAAAbI+gBALAxgh4AABsj6AEAsDGCHgAAGyPoAQCwMYIeAAAb+/8zm6zXzaNWkgAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "patches_cm = mpl.colors.ListedColormap(plt.cm.Blues(np.linspace(0, 1, n_patches**2)), \"Patches colormap\")\n", + "\n", + "fm = np.zeros((map_size, map_size))\n", + "\n", + "color = 0\n", + "for x in range(n_patches):\n", + " for y in range(n_patches):\n", + " # print(f\"{x*patch_size}:{(x+1)*patch_size}, {y*patch_size}:{(y+1)*patch_size} -> {color}\")\n", + " fm[x*patch_size:(x+1)*patch_size,y*patch_size:(y+1)*patch_size] = color\n", + " color += 1\n", + "\n", + "shifted_fm = np.roll(\n", + " fm, shift=[-shift_size * patch_size, -shift_size * patch_size], axis=[0, 1]\n", + ")\n", + "\n", + "fig, axes = plt.subplots(1, 2, sharex=True, sharey=True)\n", + "for i, data in enumerate([fm, shifted_fm]):\n", + " im = axes[i].matshow(data, cmap=patches_cm, extent=[0, map_size, map_size, 0])\n", + " for j in range(floor(map_size / (window_size * patch_size))):\n", + " axes[i].add_artist(\n", + " lines.Line2D(\n", + " [\n", + " window_size * patch_size * j,\n", + " window_size * patch_size * j\n", + " ],\n", + " [\n", + " 0,\n", + " map_size\n", + " ],\n", + " color=\"Red\"\n", + " )\n", + " )\n", + " axes[i].add_artist(\n", + " lines.Line2D(\n", + " [\n", + " 0,\n", + " map_size\n", + " ],\n", + " [\n", + " window_size * patch_size * j,\n", + " window_size * patch_size * j\n", + " ],\n", + " color=\"Red\"\n", + " )\n", + " )\n", + "\n", + " if i == 0: # Only draw shifted windows boundaries on the first image\n", + " axes[i].add_artist(\n", + " lines.Line2D(\n", + " [\n", + " window_size * patch_size * j + shift_size * patch_size,\n", + " window_size * patch_size * j + shift_size * patch_size\n", + " ],\n", + " [\n", + " 0 + shift_size * patch_size,\n", + " map_size\n", + " ],\n", + " color=\"Cyan\"\n", + " )\n", + " )\n", + " axes[i].add_artist(\n", + " lines.Line2D(\n", + " [\n", + " 0 + shift_size * patch_size,\n", + " map_size\n", + " ],\n", + " [\n", + " window_size * patch_size * j + shift_size * patch_size,\n", + " window_size * patch_size * j + shift_size * patch_size\n", + " ],\n", + " color=\"Cyan\"\n", + " )\n", + " )\n", + "fig.colorbar(im, ax=axes.ravel().tolist())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We now proceed to build and visualize the mask which will be used during SW-MSA.\n", + "This mask is to prevent patches to pay attention to other patches in the same window which are not really adjacent to each other due to the cyclic shift applied earlier.\n", + "\n", + "The first image depicts, step by step, how patches in each window get a value assigned denoting their origin.\n", + "For example, the top-left-most window only contains patches coming from the same window which gets the value 0 assigned.\n", + "On the other hand, the bottom-right-most windows is made of patches coming from a variety of windows, each getting a different value assigned." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAekAAAGTCAYAAAAMd+owAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAriUlEQVR4nO3df3RU9Z3/8dckmOFXZiRK+CEJUi1YiAElxJNlF1EinizlC22PujY9ZsPWbruJC82ptvmjJJxKg+seT/wqJ7CsgtttBLUb2OMusDHfktTFYAjGBatW+LplioRolRmSwiCZ+/1jy3wZSDCfydzJnZnn45x7PPeeez/vD/BuX3N/zFyXZVmWAACA46SN9AQAAMDACGkAAByKkAYAwKEIaQAAHIqQBgDAoQhpAAAcipAGAMChCGkAAByKkAYAwKHiHtLBYFC1tbUKBoPUopaj6lMrsWqNdH1qJVathGVF4dlnn7WmT59uud1uq7Cw0Dpw4MCQj/X7/ZYky+/3R1PaCLUSq9ZI16dWYtUa6frUSqxaIyEQCFirV6+2cnNzrdGjR1tFRUXWm2++aTSG8Zn0jh07VFVVpZqaGh06dEhz587Vvffeq56enph8aAAAIBl8+9vfVnNzs372s5/p8OHDWrp0qYqLi3XixIkhj2Ec0k899ZQefvhhlZeXa/bs2dq0aZPGjh2r559/3nQoAACS0tmzZ/WLX/xCf/d3f6dFixbp5ptvVm1trW6++WY1NDQMeZxRJkXPnz+vzs5OVVdXh7elpaWpuLhYb7zxxoDHBIPBiPsNp0+fliT5/X6T0lEJBAIR/6WWuYv/TqFQyPZaEv2S6LXoF2qZiFe/nDt3TufPn4/JWJZlyeVyRWxzu91yu90R2y5cuKD+/n6NHj06YvuYMWP0+uuvGxUcshMnTliSrP3790dsf/TRR63CwsIBj6mpqbEksST4cuzYMaP7KNGiX5JjoV9YnNIvZ8+etcbKFbO5jh8//optNTU1A9YuKiqy7rzzTuvEiRPWhQsXrJ/97GdWWlqaNXPmzCHP32VZQ3+f9EcffaQbbrhB+/fvV1FRUXj7Y489ptbWVh04cOCKYy7/pOv3+5Wbmyvfb96RJzNzqKUxQgJnzihn5hydPn1aXq/X9nqp1C+hj31xrZc2Mcf2GvSLfeLdL9ax/7K9RuAPZ3VjaZWt/RIIBOT1elWqccqQ64sPuIrzsvRz9cnn88nj8YS3D3QmLUnHjh3TqlWr1NbWpvT0dN1+++2aOXOmOjs79e677w6pptHl7uuvv17p6ek6depUxPZTp05p8uTJAx4z2OQ9mZkRf0g42+WXd+ySSv0SOjc+rvXS4vj3R7/EXrz7xRo3Jm614tEvGXINO6Qv8ng8Q+qvm266Sa2trerr61MgENCUKVP0wAMP6Etf+tKQaxk9OJaRkaH58+erpaUlvC0UCqmlpSXizBoAAPyPcePGacqUKfrss8+0d+9erVixYsjHGp1JS1JVVZXKyspUUFCgwsJC1dfXq6+vT+Xl5aZDAQCQtPbu3SvLsjRr1iwdPXpUjz76qG655RajvDQO6QceeEAff/yx1q5dq+7ubs2bN0979uzRpEmTTIcCACBp+f1+VVdX63e/+52ysrL0jW98Q+vXr9c111wz5DGMQ1qSKisrVVlZGc2hAACkhPvvv1/333//sMbgBRsAADgUIQ0AgEMR0gAAOBQhDQCAQxHSAAA4FCENAIBDEdIAADgUIQ0AgEMR0gAAOBQhDQCAQxHSAAA4FCENAIBDEdIAADgUIQ0AgEMR0gAAOBQhDQCAQxmHdFtbm5YvX66pU6fK5XJp586dNkwLAAAYh3RfX5/mzp2rjRs32jEfAADwR6NMDygpKVFJSYkdcwEAAJcwDmlTwWBQwWAwvB4IBOwuiQRGv8AE/YJkZ/uDY3V1dfJ6veElJyfH7pJIYPQLTNAvSHa2h3R1dbX8fn948fl8dpdEAqNfYIJ+QbKz/XK32+2W2+22uwySBP0CE/QLkh3fkwYAwKGMz6R7e3t19OjR8PqHH36orq4uZWVlKTc3N6aTAwAglRmH9MGDB3XXXXeF16uqqiRJZWVl2rZtW8wmBgBAqjMO6cWLF8uyLDvmAgAALsE9aQAAHIqQBgDAoQhpAAAcipAGACDG+vv79eMf/1gzZszQmDFjdNNNN+knP/mJ8TNdtv+YCQAAqeaJJ55QQ0ODXnjhBc2ZM0cHDx5UeXm5vF6v/vZv/3bI4xDSAADE2P79+7VixQotW7ZMknTjjTfqxRdf1Jtvvmk0Dpe7AQAwEAgEIpZL38R20Z/8yZ+opaVFv/nNbyRJb7/9tl5//XXjVz1zJg0ASHpFHrfGuIZ3XnrWCmlroPeKt63V1NSotrY2YtuPfvQjBQIB3XLLLUpPT1d/f7/Wr1+v0tJSo5qENAAABnw+nzweT3h9oJe8vPTSS/r5z3+uxsZGzZkzR11dXVqzZo2mTp2qsrKyIdcipAEAMODxeCJCeiCPPvqofvSjH+kv/uIvJEm33nqrfvvb36qurs4opLknDQBAjP3hD39QWlpkxKanpysUChmNw5k0AAAxtnz5cq1fv165ubmaM2eO3nrrLT311FNatWqV0TiENAAAMfbMM8/oxz/+sf7mb/5GPT09mjp1qv76r/9aa9euNRqHkAYAIMYyMzNVX1+v+vr6YY3DPWkAAByKkAYAwKGMQrqurk4LFixQZmamsrOztXLlSr3//vt2zQ0AgJRmFNKtra2qqKhQe3u7mpub9fnnn2vp0qXq6+uza34AAKQsowfH9uzZE7G+bds2ZWdnq7OzU4sWLYrpxAAASHXDerrb7/dLkrKysgbdJxgMRvz4eCAQGE5JJDn6BSboFyS7qEM6FAppzZo1WrhwofLy8gbdr66uTuvWrYu2DFJMKvVLWvb0kZ5CwqNfbBSHeul8qPpCUT/dXVFRoSNHjmj79u1X3a+6ulp+vz+8+Hy+aEsiBdAvMEG/INlFdSZdWVmpV199VW1tbZo2bdpV93W73QO+IQQYCP0CE/QLkp1RSFuWpUceeURNTU3at2+fZsyYYde8AABIeUYhXVFRocbGRu3atUuZmZnq7u6WJHm9Xo0ZM8aWCQIAkKqM7kk3NDTI7/dr8eLFmjJlSnjZsWOHXfMDACBlGV/uBgAA8cFvdwMA4FCENAAADkVIAwDgUIQ0AAAORUgDAOBQhDQAAA5FSAMA4FCENAAADkVIAwDgUIQ0AAAORUgDAOBQhDQAAA5FSAMA4FCENAAADkVIAwDgUIQ0AAAORUgDAOBQRiHd0NCg/Px8eTweeTweFRUVaffu3XbNDQCAhHXjjTfK5XJdsVRUVAx5jFEmBadNm6YNGzboy1/+sizL0gsvvKAVK1borbfe0pw5c4z/AAAAJKuOjg719/eH148cOaJ77rlH991335DHMArp5cuXR6yvX79eDQ0Nam9vHzSkg8GggsFgeD0QCJiURIqhX2CCfoGTTZw4MWJ9w4YNuummm3TnnXcOeYyo70n39/dr+/bt6uvrU1FR0aD71dXVyev1hpecnJxoSyIF0C8wQb9gJAQCgYjl0g+Kgzl//rz++Z//WatWrZLL5RpyLZdlWZbJ5A4fPqyioiKdO3dO48ePV2Njo/78z/980P0H+qSbk5Mj/8nj8ng8JqUxAgKBgLxTcuX3++Py70W/JDb6BSbi0S+BQEBer1f/J/sGjU8b3rPSvaGQ7u45ccX2mpoa1dbWXvXYl156Sd/85jd1/PhxTZ06dcg1jS53S9KsWbPU1dUlv9+vV155RWVlZWptbdXs2bMH3N/tdsvtdpuWQYqiX2CCfsFI8Pl8ER8qhtKDzz33nEpKSowCWooipDMyMnTzzTdLkubPn6+Ojg49/fTT2rx5s+lQAAAknIvfcBqq3/72t3rttdf0L//yL8a1hv096VAoNKTr8QAApKKtW7cqOztby5YtMz7W6Ey6urpaJSUlys3N1ZkzZ9TY2Kh9+/Zp7969xoUBAEh2oVBIW7duVVlZmUaNMr54bRbSPT09euihh3Ty5El5vV7l5+dr7969uueee4wLAwCQ7F577TUdP35cq1atiup4o5B+7rnnoioCAEAqWrp0qQy/RBWB3+4GAMChCGkAAByKkAYAwKEIaQAAHIqQBgDAoQhpAAAcipAGAMChCGkAAByKkAYAwKEIaQAAHIqQBgDAoQhpAAAcipAGAMChCGkAAByKkAYAwKEIaQAAHGpYIb1hwwa5XC6tWbMmRtMBAAAXRR3SHR0d2rx5s/Lz82M5HwAA8EdRhXRvb69KS0u1ZcsWTZgwIdZzAgAAkkZFc1BFRYWWLVum4uJiPf7441fdNxgMKhgMhtf9fr8kKXDmTDSlEWcX/50sy4pLPfolsdEvMBHvfklExiG9fft2HTp0SB0dHUPav66uTuvWrbtie87MOaalMYJ+//vfy+v12l6HfkkO9AtMxKtfEpHLMvgI4/P5VFBQoObm5vC96MWLF2vevHmqr68f8JjLP+mePn1a06dP1/Hjx23/RwkEAsrJyZHP55PH46FWFPx+v3Jzc/XZZ5/p2muvtbWWRL8kei36hVom4tEvgUBAXq9X/yf7Bo1PG94XmnpDId3dc0J+v9/2v5uLjM6kOzs71dPTo9tvvz28rb+/X21tbXr22WcVDAaVnp4ecYzb7Zbb7b5iLK/XG7c/pMfjodYwpQ2zuYeKfkmOWvQLtUzEq18SkVFIL1myRIcPH47YVl5erltuuUU//OEPrwhoAAAQPaOQzszMVF5eXsS2cePG6brrrrtiOwAAGJ64X2Nwu92qqakZ8BIVtVK71kjXp1Zi1Rrp+tRKrFqJyujBMQAAEkmiPzjG3XoAAByKkAYAwKEIaQAAHIqQBgDABidOnNC3vvUtXXfddRozZoxuvfVWHTx40GiMqH67GwAADO6zzz7TwoULddddd2n37t2aOHGiPvjgA+OXUhHSAADE2BNPPKGcnBxt3bo1vG3GjBnG43C5GwAAA4FAIGK59PfjL/rXf/1XFRQU6L777lN2drZuu+02bdmyxbgW35MGACSti9+T/u/Cr8gzang/XR240K8b33z3iu01NTWqra2N2DZ69GhJUlVVle677z51dHRo9erV2rRpk8rKyoZck8vdAAAYuPwNYQP9YlooFFJBQYF++tOfSpJuu+02HTlyxDik4365OxgMqra2dsDLA9RK7VojXZ9aiVVrpOtTK7FqxdLFN4RdXAYK6SlTpmj27NkR277yla/o+PHjRrWiuty9ceNGPfnkk+ru7tbcuXP1zDPPqLCwcEjHXrz0EI+fVaNWYtUa6frUSqxaI12fWolRy47L3UOZ7ze/+U35fD796le/Cm/7/ve/rwMHDmj//v1Drml8Jr1jxw5VVVWppqZGhw4d0ty5c3Xvvfeqp6fHdCgAAJLS97//fbW3t+unP/2pjh49qsbGRv3DP/yDKioqjMYxDumnnnpKDz/8sMrLyzV79mxt2rRJY8eO1fPPP286FAAASWnBggVqamrSiy++qLy8PP3kJz9RfX29SktLjcYxenDs/Pnz6uzsVHV1dXhbWlqaiouL9cYbbwx4TDAYjLjfcPr0aUmS3+83mmg0AoFAxH+pZe7iv1MoFLK9lkS/JHot+oVaJuLdL/H21a9+VV/96leHNYZRSH/yySfq7+/XpEmTIrZPmjRJ77333oDH1NXVad26dVdsz83NNSk9LDk5OdQapk8//VTXXnut7XXol+SoRb9Qy0S8+iURGT049tFHH+mGG27Q/v37VVRUFN7+2GOPqbW1VQcOHLjimMs/6fr9fuXm5sr3m3fkycwc5vSdJfSxL671rGP/ZXuNwB/O6sbSKp0+fVper9f2evSLfeiXxEa/RFljhB4cixWjM+nrr79e6enpOnXqVMT2U6dOafLkyQMe43a7B3w83ZOZOSJPf9opdG58XOtZ48bErZbL5YpLHfrFPvRLYqNfUpPRg2MZGRmaP3++WlpawttCoZBaWloizqwBAMDwGf/iWFVVlcrKylRQUKDCwkLV19err69P5eXldswPAICUZRzSDzzwgD7++GOtXbtW3d3dmjdvnvbs2XPFw2QAAGB4ovrt7srKSlVWVsZ6LgAA4BK8qhIAAIcipAEAcChCGgAAhyKkAQBwKEIaAACHIqQBAHAoQhoAAIcipAEAcChCGgAAhyKkAQBwKEIaAACHIqQBAHAoQhoAAIcipAEAcChCGgAAhyKkAQBwKEIaAACHMg7ptrY2LV++XFOnTpXL5dLOnTttmBYAADAO6b6+Ps2dO1cbN260Yz4AAOCPRpkeUFJSopKSEjvmAgAALmEc0qaCwaCCwWB4PRAI2F0SCYx+gQn6BcnO9gfH6urq5PV6w0tOTo7dJZHA6BeYoF+Q7GwP6erqavn9/vDi8/nsLokERr/ABP2CZGd7SLvdbnk8nogFGAz9AhP0C5yqtrZWLpcrYrnllluMx7H9njQAAKlozpw5eu2118Lro0aZR67xEb29vTp69Gh4/cMPP1RXV5eysrKUm5trPAEAAJLRqFGjNHny5GGNYXy5++DBg7rtttt02223SZKqqqp02223ae3atcOaCAAAiSAQCEQsl37D4FIffPCBpk6dqi996UsqLS3V8ePHjWsZn0kvXrxYlmUZFwIAYKRk3jpNmRnXDGsM6/zn0pvvXvEtgpqaGtXW1kZsu+OOO7Rt2zbNmjVLJ0+e1Lp16/Rnf/ZnOnLkiDIzM4dck3vSAAAY8Pl8EQ8put3uK/a59Ee/8vPzdccdd2j69Ol66aWX9Fd/9VdDrkVIAwBgIJpvElx77bWaOXNmxDNdQ8FbsAAAsFlvb6+OHTumKVOmGB1HSAMAEGM/+MEP1Nraqv/+7//W/v379bWvfU3p6el68MEHjcbhcjcAADH2u9/9Tg8++KB+//vfa+LEifrTP/1Ttbe3a+LEiUbjENIAAMTY9u3bYzIOl7sBAHAoQhoAAIcipAEAcChCGgAAhyKkAQBwKEIaAACHIqQBAHAoQhoAAIcipAEAcChCGgAAhzIK6bq6Oi1YsECZmZnKzs7WypUr9f7779s1NwAAUppRSLe2tqqiokLt7e1qbm7W559/rqVLl6qvr8+u+QEAkLKMXrCxZ8+eiPVt27YpOztbnZ2dWrRoUUwnBgBAqhvWW7D8fr8kKSsra9B9gsGggsFgeD0QCEiSLvzTBl0Y7R5OeccZ9e2a+BbMnm57ifQ//nvFSyr1i/Xee3Gtd83fv2h7DfrFPvHul963j9teI3Ch3/YaiS7qB8dCoZDWrFmjhQsXKi8vb9D96urq5PV6w0tOTk60JZEC6BeYoF+Q7KIO6YqKCh05cuQL35lZXV0tv98fXnw+X7QlkQLoF5igX5DsorrcXVlZqVdffVVtbW2aNm3aVfd1u91yu5PrshPsQ7/ABP2CZGcU0pZl6ZFHHlFTU5P27dunGTNm2DUvAABSnlFIV1RUqLGxUbt27VJmZqa6u7slSV6vV2PGjLFlggAApCqje9INDQ3y+/1avHixpkyZEl527Nhh1/wAAEhZxpe7AQBAfPDb3QAAOBQhDQCAQxHSAAA4FCENAIBDEdIAADgUIQ0AgEMR0gAAOBQhDQCAQxHSAAA4FCENAIDNNmzYIJfLpTVr1hgdR0gDAGCjjo4Obd68Wfn5+cbHEtIAANikt7dXpaWl2rJliyZMmGB8PCENAICBQCAQsQSDwUH3raio0LJly1RcXBxVLaO3YAEAkIhcM2fKNcY9vDHOBiU1KycnJ2J7TU2Namtrr9h/+/btOnTokDo6OqKuSUgDAGDA5/PJ4/GE193uK8Pf5/Np9erVam5u1ujRo6OuRUgDAGDA4/FEhPRAOjs71dPTo9tvvz28rb+/X21tbXr22WcVDAaVnp7+hbWM7kk3NDQoPz8/PMGioiLt3r3bZAgAAJLekiVLdPjwYXV1dYWXgoIClZaWqqura0gBLRmeSU+bNk0bNmzQl7/8ZVmWpRdeeEErVqzQW2+9pTlz5kT1BwEAINlkZmYqLy8vYtu4ceN03XXXXbH9aoxCevny5RHr69evV0NDg9rb2wlpAABiLOp70v39/Xr55ZfV19enoqKiQfcLBoMRj6cHAoFoSyIF0C8wQb8gkezbt8/4GOOQPnz4sIqKinTu3DmNHz9eTU1Nmj179qD719XVad26dVcWfuhHGvUFN96ReugXmKBf7GP+sxvm0gMBaUpuHColLuMfM5k1a5a6urp04MABfe9731NZWZl+/etfD7p/dXW1/H5/ePH5fMOaMJIb/QIT9AuSnfGZdEZGhm6++WZJ0vz589XR0aGnn35amzdvHnB/t9s94HfIgIHQLzBBvyDZDftnQUOh0FV/Eg0AAETH6Ey6urpaJSUlys3N1ZkzZ9TY2Kh9+/Zp7969ds0PAICUZRTSPT09euihh3Ty5El5vV7l5+dr7969uueee+yaHwAAKcsopJ977jm75gEAAC7DqyoBAHAoQhoAAIcipAEAcChCGgAAhyKkAQBwKEIaAACHIqQBAHAoQhoAAIcipAEAcChCGgAAhyKkAQBwKEIaAACHIqQBAHAoQhoAAIcipAEAcChCGgAAhxpWSG/YsEEul0tr1qyJ0XQAAMBFUYd0R0eHNm/erPz8/FjOBwAA/FFUId3b26vS0lJt2bJFEyZMiPWcAACApFHRHFRRUaFly5apuLhYjz/++FX3DQaDCgaD4XW/3y9JCpw5E01pxNnFfyfLsuJSj35JbPQLTMS7XxKRcUhv375dhw4dUkdHx5D2r6ur07p1667YnjNzjmlpjKD169frySef1OrVq1VfX29bHfolOdAvMBGvfklERiHt8/m0evVqNTc3a/To0UM6prq6WlVVVfrFL36h7373u1q/fr0effRRPfjgg/r3f/93dXZ2auLEiVFNfjDNzc1qb2/XzJkz9Z3vfEdbtmzR/fffH9MaF33961/XN77xDc2cOVPFxcW6++679cEHH+jAgQMaN25cTGvt3r1b6enpmjRpkhYtWqTKykpt3rxZv/rVr/SVr3wlprUu8vv9ys3N1Y4dO+Ly/AH9Ejv0C/1iIhX6JSFZBpqamixJVnp6eniRZLlcLis9Pd26cOHCoMcWFhZaFRUVlt/vtyRZn332mTV16lSrrq7OZApGLtb6+c9/bluNy2sdO3bMkmS1trbaXsvv91sTJkyw/vEf/9G2WidOnLAkWTt37rTuvPNOa/Xq1bbVuhT9Evta9Ets0S/DF49+ufjn+fSJCuvC/64a1vLpExXhv5t4MXpwbMmSJTp8+LC6urrCS0FBgUpLS9XV1aX09PQBjzt//rw6OztVXFwc3paWlqbi4mK98cYbJlNwvIv3xLKysmyv9corr6ivr09FRUW21fjBD34gSbrrrrtsq3E5+sUe9Evio18SR0NDg/Lz8+XxeOTxeFRUVKTdu3cbj2MU0pmZmcrLy4tYxo0bp+uuu055eXmDHvfJJ5+ov79fkyZNitg+adIkdXd3G0/ayaqrq7Vw4cKr/n0Mx+HDhzV16lRJUlVVlZqamjR79mxbam3fvl1vv/22LWNfDf0SO/QL/WIiFfolXqZNm6YNGzaos7NTBw8e1N13360VK1bonXfeMRonqqe7h8PtdqumpkZutzsutSTpmmuuiUut+fPn691339V//ud/2lZn1qxZevPNN1VfXy+v16uysjK1trbG/H9IF58/+Ld/+ze9+uqrcfn3Ggj9Mjz0i721JPolGk7pFzstX748Yn39+vVqaGhQe3u75swZ+oONLsuy/9n38+fPa+zYsXrllVe0cuXK8PaysjKdPn1au3btsq22y+VSU1NTRF07VFZWateuXWpra9OMGTNsrXWp4uJi3XTTTdq8eXNMx925c6e+9rWvRdzC6O/vl8vlUlpamoLB4KC3N4aLfrEP/RJb9Et04tkvgUBAXq9Xnz5RIc+Y4X0YCJwNKuuHG+Xz+eTxeMLb3W73VT9o9Pf36+WXX1ZZWZneeustow89cTmTzsjI0Pz589XS0hJu5lAopJaWFlVWVsZjCraxLEuPPPKImpqatG/fvrj+D0j6n7/HS78nGisXnz+4VHl5uW655Rb98Ic/tO3/cCX6xU70S2KhX2Jo1q3SuDHDG6PvrCQpJycnYnNNTY1qa2uv2P3w4cMqKirSuXPnNH78+KhuH8TtcndVVZXKyspUUFCgwsJC1dfXq6+vT+Xl5TGv1dvbq6NHj4bXP/zwQ3V1dSkrK0u5ubkxrVVRUaHGxkbt2rVLmZmZ4XtgXq9XY8YMsyEuU11drZKSEuXm5urMmTNqbGzUvn37tHfv3pjWkf7/8weXGsrzB7FCvwwf/UK/mEilfhmugc6kBzJr1ix1dXXJ7/frlVdeie72QdyeI7cs65lnnrFyc3OtjIwMq7Cw0Gpvb7elzi9/+UtL0hVLWVlZzGsNVEeStXXr1pjXWrVqlTV9+nQrIyPDmjhxorVkyRLrP/7jP2JeZzDx/EqNZdEvw0W/0C8mkrVfwl/B2rnJutD8wrCWT3duGtZXsJYsWWJ95zvfMTomLvekAQAYCeF70js3yTPMy92BvrPKWvld+f3+iDPpobr77ruVm5urbdu2DfmYuD/dDQBAsovV7QNCGgCAGOvp6dFDDz2kkydPyuv1Kj8/X3v37tU999xjNA4hDQBAjD333HMxGSeq90kDAAD7EdIAADgUIQ0AgEPFPaSDwaBqa2tt+RUbaiV2rZGuT63EqjXS9amVWLUSVVTfk964caOefPJJdXd3a+7cuXrmmWdUWFg4pGMvfmct2u+ZmaBWYtUa6frUSqxaI12fWolRy0nfk46G8Zn0jh07VFVVpZqaGh06dEhz587Vvffeq56eHjvmBwBAyjIO6aeeekoPP/ywysvLNXv2bG3atEljx47V888/b8f8AABIWUbfkz5//rw6OztVXV0d3paWlqbi4mK98cYbAx4TDAYj7jecPn1akuT3+6OYrplAIBDxX2qZu/jvFAqFbK8l0S+JXot+oZaJePdLIjIK6U8++UT9/f2aNGlSxPZJkybpvffeG/CYuro6rVu37ortsX5bzNVc/loxapn79NNPde2119peh35Jjlr0C7VMxKtfEpHRg2MfffSRbrjhBu3fv19FRUXh7Y899phaW1t14MCBK465/JOu3+9Xbm6ufL95R57MzGFO31lCH/viWs869l+21wj84axuLK3S6dOn5fV6ba9Hv9iHfkls9EuUNRL8wTGjM+nrr79e6enpOnXqVMT2U6dOafLkyQMe43a7B3zXpiczc0Se/rRT6Nz4uNazhvsCcwMulysudegX+9AviY1+SU1GD45lZGRo/vz5amlpCW8LhUJqaWmJOLMGAADDZ/yCjaqqKpWVlamgoECFhYWqr69XX1+fysvL7ZgfAAApyzikH3jgAX388cdau3aturu7NW/ePO3Zs+eKh8kAAMDwRPWqysrKSlVWVsZ6LgAA4BK8YAMAAIcipAEAcChCGgAAhyKkAQBwKEIaAACHIqQBAHAoQhoAAIcipAEAcChCGgAAhyKkAQBwKEIaAACHIqQBAHAoQhoAAIcipAEAcChCGgAAhyKkAQCIsbq6Oi1YsECZmZnKzs7WypUr9f777xuPYxzSbW1tWr58uaZOnSqXy6WdO3caFwUAIJm1traqoqJC7e3tam5u1ueff66lS5eqr6/PaJxRpoX7+vo0d+5crVq1Sl//+tdNDwcAIOnt2bMnYn3btm3Kzs5WZ2enFi1aNORxjEO6pKREJSUlpocBAJAUAoFAxLrb7Zbb7b7qMX6/X5KUlZVlVMs4pE0Fg0EFg8Hw+uV/OOBS9AtM0C8YqrSb8pWWOX54Y5zplSTl5OREbK+pqVFtbe2gx4VCIa1Zs0YLFy5UXl6eUU3bQ7qurk7r1q2zuwySBP0CE/QLRoLP55PH4wmvf9FZdEVFhY4cOaLXX3/duJbtT3dXV1fL7/eHF5/PZ3dJJDD6BSboF4wEj8cTsVwtpCsrK/Xqq6/ql7/8paZNm2Zcy/Yz6aFcqwcuol9ggn6BU1mWpUceeURNTU3at2+fZsyYEdU4toc0AACppqKiQo2Njdq1a5cyMzPV3d0tSfJ6vRozZsyQxzEO6d7eXh09ejS8/uGHH6qrq0tZWVnKzc01HQ4AgKTT0NAgSVq8eHHE9q1bt+ov//IvhzyOcUgfPHhQd911V3i9qqpKklRWVqZt27aZDgcAQNKxLCsm4xiH9OLFi2NWHAAADI7f7gYAwKEIaQAAHIqQBgDAoQhpAAAcipAGAMChCGkAAByKkAYAwKEIaQAAHIqQBgDAoQhpAAAcipAGAMChCGkAAByKkAYAwKEIaQAAHIqQBgDAoQhpAAAcyiik6+rqtGDBAmVmZio7O1srV67U+++/b9fcAABIaUYh3draqoqKCrW3t6u5uVmff/65li5dqr6+PrvmBwBAyhplsvOePXsi1rdt26bs7Gx1dnZq0aJFMZ0YAACpziikL+f3+yVJWVlZg+4TDAYVDAbD64FAQJJ04Z826MJo93DKO4713ntxrdf79nHbawQu9Nte41L0i33ol8RGv6SmqB8cC4VCWrNmjRYuXKi8vLxB96urq5PX6w0vOTk50ZZECqBfYIJ+QbKLOqQrKip05MgRbd++/ar7VVdXy+/3hxefzxdtSaQA+gUm6Bcku6gud1dWVurVV19VW1ubpk2bdtV93W633O7kuuwE+9AvMEG/INkZhbRlWXrkkUfU1NSkffv2acaMGXbNCwCAlGcU0hUVFWpsbNSuXbuUmZmp7u5uSZLX69WYMWNsmSAAAKnK6J50Q0OD/H6/Fi9erClTpoSXHTt22DU/AABSlvHlbgAAEB/8djcAAA5FSAMA4FCENAAADkVIAwBgg7a2Ni1fvlxTp06Vy+XSzp07jccgpAEAsEFfX5/mzp2rjRs3Rj3GsF6wAQBAqrn4IpeLBvvlu5KSEpWUlAyrFiENAEh6ruunyeXxDG8M9/+E8+UvcqmpqVFtbe2wxh4MIQ0AgAGfzyfPJYFv5+/HE9IAABjweDwRIW0nHhwDAMChCGkAAByKy90AANigt7dXR48eDa9/+OGH6urqUlZWlnJzc4c0BiENAIANDh48qLvuuiu8XlVVJUkqKyvTtm3bhjQGIQ0AgA0WL1487LdHck8aAACHIqQBAHAoo5BuaGhQfn5++DtiRUVF2r17t11zAwAgpRmF9LRp07RhwwZ1dnbq4MGDuvvuu7VixQq98847ds0PAICUZfTg2PLlyyPW169fr4aGBrW3t2vOnDkxnRgAAKku6qe7+/v79fLLL6uvr09FRUWD7hcMBhUMBsPrl789BLgU/QIT9AuSnXFIHz58WEVFRTp37pzGjx+vpqYmzZ49e9D96+rqtG7duiu2v/33OzU+LbmeW1vwf4/Etd6EONRIDwSkKUP70n0spFK/vH/2XFzrfevk0S/eaZjoF/vEu19eD9hf77yG9/WkVGDcxbNmzVJXV5cOHDig733veyorK9Ovf/3rQfevrq6W3+8PLz6fb1gTRnKjX2CCfkGyMz6TzsjI0M033yxJmj9/vjo6OvT0009r8+bNA+4/2MuwgYHQLzBBvyDZDft6UCgUirgnBAAAYsPoTLq6ulolJSXKzc3VmTNn1NjYqH379mnv3r12zQ8AgJRlFNI9PT166KGHdPLkSXm9XuXn52vv3r2655577JofAAApyyikn3vuObvmAQAALpNc31EAACCJENIAADgUIQ0AgEMR0gAAOBQhDQCAQxHSAAA4FCENAIBDEdIAADgUIQ0AgEMR0gAAOBQhDQCAQxHSAAA4FCENAIBDEdIAADgUIQ0AgEMR0gAAOBQhDQCAQw0rpDds2CCXy6U1a9bEaDoAACSPjRs36sYbb9To0aN1xx136M033zQ6PuqQ7ujo0ObNm5Wfnx/tEAAAJK0dO3aoqqpKNTU1OnTokObOnat7771XPT09Qx4jqpDu7e1VaWmptmzZogkTJkQzBAAASe2pp57Sww8/rPLycs2ePVubNm3S2LFj9fzzzw95jFHRFK6oqNCyZctUXFysxx9//Kr7BoNBBYPB8Lrf75ckffnQAXk8nmjKO1bgwkjPIPYCF1ySJMuy4lIvlfplVpzrxaM/6Rf7xLtf/lccagQCAW3NyYlLvwTOnInZGIFAIGK72+2W2+2O2Hb+/Hl1dnaquro6vC0tLU3FxcV64403hl7UMvTiiy9aeXl51tmzZy3Lsqw777zTWr169aD719TUWJJYEnw5duyYaatEhX5JjoV+YXFKv5w9e9aaPHlyzOY6fvz4K7bV1NRcUffEiROWJGv//v0R2x999FGrsLBwyPN3WdbQP8L4fD4VFBSoubk5fC968eLFmjdvnurr6wc85vJPuqdPn9b06dN1/Phxeb3eoZaOSiAQUE5Ojnw+n+2fqpO1lt/vV25urj777DNde+21ttaS6JdEr0W/UMtEvPrl3LlzOn/+fEzGsixLLpcrYttAZ9IfffSRbrjhBu3fv19FRUXh7Y899phaW1t14MCBIdUzutzd2dmpnp4e3X777eFt/f39amtr07PPPqtgMKj09PQvnLwkeb3euF2O8ng81BqmtLT4fFuPfkmOWvQLtUzY3S+jR4/W6NGjba1xueuvv17p6ek6depUxPZTp05p8uTJQx7H6G9myZIlOnz4sLq6usJLQUGBSktL1dXVdUVAAwCQijIyMjR//ny1tLSEt4VCIbW0tEScWX8RozPpzMxM5eXlRWwbN26crrvuuiu2AwCQyqqqqlRWVqaCggIVFhaqvr5efX19Ki8vH/IYUT3dPRxut1s1NTUDXqKiVmrXGun61EqsWiNdn1qJVWskPPDAA/r444+1du1adXd3a968edqzZ48mTZo05DGMHhwDAADxw293AwDgUIQ0AAAORUgDAOBQhDQAAA5FSAMA4FCENAAADkVIAwDgUIQ0AAAORUgDAOBQhDQAAA5FSAMA4FD/D6PYq/VzbAjKAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "mask = np.zeros([1, res, res, 1])\n", + "\n", + "h_slices = (\n", + " slice(0, -window_size),\n", + " slice(-window_size, -shift_size),\n", + " slice(-shift_size, None),\n", + ")\n", + "w_slices = (\n", + " slice(0, -window_size),\n", + " slice(-window_size, -shift_size),\n", + " slice(-shift_size, None),\n", + ")\n", + "\n", + "windows_cm = mpl.colors.ListedColormap(\n", + " plt.cm.Reds(np.linspace(0, 1, len(h_slices) * len(w_slices))), \n", + " \"Patches colormap\"\n", + ")\n", + "fig, axes = plt.subplots(\n", + " ceil(len(h_slices) * len(w_slices) / 3), \n", + " 3, \n", + " sharex=True, \n", + " sharey=True\n", + ")\n", + "\n", + "i = 0\n", + "for h_slice in h_slices:\n", + " for w_slice in w_slices:\n", + " mask[:, h_slice, w_slice, :] = i\n", + "\n", + " im = axes[floor(i / 3), i % 3].matshow(\n", + " mask[0, :, :, 0], \n", + " cmap=windows_cm, \n", + " extent=[0, res, res, 0], \n", + " vmin=0, \n", + " vmax=len(windows_cm.colors)\n", + " )\n", + "\n", + " i += 1\n", + "\n", + "fig.colorbar(im, ax=axes.ravel().tolist())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This mask cannot be used as is, so we need to do some further processing.\n", + "\n", + "The first step is to split it into windows, as can be seen in the first figure.\n", + "\n", + "We must remember that the self-attention mechanism performs the calculation between each pair of patches in a window.\n", + "For pair of patches that were distant prior to cyclic shifting we want the attention value to be negative, so that it will result in a value close to 0 when put through the SoftMax operation.\n", + "This can be achieved by summing the attention matrix with a mask matrix having cells with large negative values for those pairs of cells distant from each other.\n", + "\n", + "Currently, however, (1) our mask does not have the right shape and the (2) right values.\n", + "To fix it, we need to do the following:\n", + "\n", + "1.\n", + " - We flatten each window by concatenating its patches along a single axis, as can be seen in the second figure.\n", + " - We broadcast this flattened vector into a square matrix by repeating its values for each row. We do the same with the transposed vector, turning it into a matrix by repeating its values for each column.\n", + " - We subtract the first matrix to the second: this yields a matrix with zeros where the two subtracted values were identical and other numbers where values were different. 0 denotes a pair of patches which are adjacent and for which the computation of attention is valid.\n", + "2. We transform every non-zero value into a big negative value (in this case, -100).\n", + "\n", + "Our final mask can be seen in the third figure.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAeQAAAGlCAYAAAArqoUwAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAApGklEQVR4nO3df1BV953/8dcVy8UIlwZ/s4CaGLVqwIriUHcTjahDLV/9zm7HzbgNxTaz24Ws9s5mktuZCu7GYtrvODjRJTZNdCdbRpu0mB1nlBKywuQbiQihX0xWG7tOQ7MRTLbeK7fxknDv94+ON70B9J7L/fG58HzMnGnvmXPO591dX7y4597LtQUCgYAAAEBCTUr0AAAAgEIGAMAIFDIAAAagkAEAMACFDACAAShkAAAMQCEDAGAAChkAAANQyAAAGIBCBgDAAHEt5EOHDmnevHlKS0vT6tWrde7cuXguH5a2tjaVlZUpOztbNptNJ06cSPRIw9TW1mrVqlXKyMjQzJkztXXrVl26dCnRYw1TX1+v/Px8ORwOORwOFRcX69SpU4ke64727dsnm82mXbt2JXoUy0zPWDLkSyJjsZTM+Yq1uBXy8ePH5XQ6VV1dra6uLhUUFGjTpk3q7++P1whh8Xq9Kigo0KFDhxI9yqhaW1tVWVmp9vZ2NTc365NPPtHGjRvl9XoTPVqInJwc7du3T52dnTp//rweeughbdmyRW+//XaiRxtVR0eHDh8+rPz8/ESPYlkyZCwZ8iWRsVhJ5nzFRSBOioqKApWVlcHHQ0NDgezs7EBtbW28RrBMUqCxsTHRY9xRf39/QFKgtbU10aPc0d133x34yU9+kugxRnTjxo3AfffdF2hubg48+OCDgZ07dyZ6JEuSLWPJkq9AgIxFQ7LnKx7i8gx5cHBQnZ2dKikpCe6bNGmSSkpKdPbs2XiMMK653W5JUlZWVoInGd3Q0JCOHTsmr9er4uLiRI8zosrKSm3evDnk32myIGOxRcbGLpnzFS+T47HIhx9+qKGhIc2aNStk/6xZs3Tx4sV4jDBu+f1+7dq1S2vWrNGyZcsSPc4wPT09Ki4u1s2bN5Wenq7GxkYtWbIk0WMNc+zYMXV1damjoyPRo0SEjMUOGRu7ZM9XvMSlkBE7lZWVunDhgl5//fVEjzKiRYsWqbu7W263Wy+//LLKy8vV2tpq1A+M3t5e7dy5U83NzUpLS0v0ODAMGRsb8hW+uBTy9OnTlZKSor6+vpD9fX19mj17djxGGJeqqqp08uRJtbW1KScnJ9HjjCg1NVULFiyQJBUWFqqjo0MHDhzQ4cOHEzzZZzo7O9Xf368VK1YE9w0NDamtrU0HDx6Uz+dTSkpKAie8MzIWG2Rs7MZDvuIlLq8hp6amqrCwUC0tLcF9fr9fLS0tRr7WYbpAIKCqqio1Njbqtdde0/z58xM9Utj8fr98Pl+ixwixfv169fT0qLu7O7itXLlS27dvV3d3d1L8sCBj0UXGomc85Cte4nbL2ul0qry8XCtXrlRRUZHq6urk9XpVUVERrxHCMjAwoMuXLwcfX7lyRd3d3crKylJeXl4CJ/tMZWWlGhoa9MorrygjI0NXr16VJGVmZmrKlCkJnu4zLpdLpaWlysvL040bN9TQ0KAzZ86oqakp0aOFyMjIGPba4NSpUzVt2jQjXzMcTTJkLBnyJZGxaBov+YqLeL6l+5lnngnk5eUFUlNTA0VFRYH29vZ4Lh+W//iP/whIGraVl5cnerSgkeaTFDhy5EiiRwuxY8eOwNy5cwOpqamBGTNmBNavXx/45S9/meixwpKsH8swPWPJkK9AgIzFWrLmK9ZsgUAgEM9fAAAAwHD8LWsAAAxAIQMAYAAKGQAAA1DIAAAYgEIGAMAAFDIAAAaIeyH7fD7V1NQY9ZdkRsKc0cWc8ZEs8zNndDHnOBHJh5cPHjwYmDt3bsButweKiooCb775Ztjnut3ugKSA2+2OZOm4Yc7oYs7wkS9zMGd0JcuckfJ4PIGdO3cG8vLyAmlpaYHi4uLAuXPnwj7f8jPk48ePy+l0qrq6Wl1dXSooKNCmTZvU398fvd8SgAmKfAHJ69vf/raam5v14osvqqenRxs3blRJSYnef//9sM63XMj79+/Xo48+qoqKCi1ZskTPPvus7rrrLr3wwguWhwcQinwByenjjz/Wz3/+c/3whz/UAw88oAULFqimpkYLFixQfX19WNew9OUSg4OD6uzslMvlCu6bNGmSSkpKdPbs2RHP8fl8Ia8XXL9+XZLkdrutLB13Ho8n5D9NxZzRdevfpd/vj/va5Ms8zBld8crXzZs3NTg4OObrBAIB2Wy2kH12u112u33YsZ9++qmGhoaGfefzlClTwv8ubSv3x99///2ApMAbb7wRsv/xxx8PFBUVjXhOdXX1qH+onY3N1O03v/mNlWhEBflimyhbLPP18ccfB+6SLSpzpqenD9tXXV096trFxcWBBx98MPD+++8HPv3008CLL74YmDRpUmDhwoVhzR7zr190uVxyOp3Bx263W3l5eer99dtyZGTEennAEs+NG8pduFTTpk1L9ChhGS1fv33jl3KkT03gZONH4Df/L9EjjBueP3ysedudMc3X4OCg/qCAtmuqUmW78wmjXUcB/XRgQL29vXI4HMH9Iz07vuXFF1/Ujh079Gd/9mdKSUnRihUr9PDDD6uzszOsNS0V8vTp05WSkqK+vr6Q/X19fZo9e/aI54z29N6RkRHyPxIwyedvU8VDVPOVPlWOjPSYzDnRBKaa8/3H40U88pUq25gK+RaHwxF2V917771qbW2V1+uVx+PRnDlztG3bNt1zzz1hnW/pTV2pqakqLCxUS0tLcJ/f71dLS4uKi4utXArA55AvYHyYOnWq5syZo9///vdqamrSli1bwjrP8i1rp9Op8vJyrVy5UkVFRaqrq5PX61VFRYXloQGEIl9A8mpqalIgENCiRYt0+fJlPf7441q8eHHY+bVcyNu2bdO1a9e0e/duXb16VcuXL9fp06c1a9Ysy8MDCEW+gOTldrvlcrn0u9/9TllZWfrLv/xL7d27V1/4whfCOt8WCAQCMZ4xhMfjUWZmptwfvMdryDCOx+NR5pw8ud3upPz3eStfv/9//5fXkKMk8O5biR5h3PB4P1bW//5OTPN1KwMVSh/zm7qOaCCuPwv4cgkAAAxAIQMAYAAKGQAAA1DIAAAYgEIGAMAAFDIAAAagkAEAMACFDACAAShkAAAMQCEDAGAAChkAAANQyAAAGIBCBgDAABQyAAAGoJABADAAhQwAgAEoZAAADEAhAwBgAAoZAAADUMgAABiAQgYAwAAUMgAABqCQAQAwAIUMAIABKGQAAAxAIQMAYAAKGQAAA1DIAAAYgEIGAMAAFDIAAAagkAEAGKOhoSF9//vf1/z58zVlyhTde++9+ud//mcFAoGwr2G5kNva2lRWVqbs7GzZbDadOHHC6iUAjIJ8Acnp6aefVn19vQ4ePKj//M//1NNPP60f/vCHeuaZZ8K+huVC9nq9Kigo0KFDh6yeCuAOyBeQnN544w1t2bJFmzdv1rx58/RXf/VX2rhxo86dOxf2NSZbXbS0tFSlpaVWTwMQBvIFmMXj8YQ8ttvtstvtw477yle+oh//+Mf69a9/rYULF+pXv/qVXn/9de3fvz/stSwXslU+n08+ny/4+PP/4wBEjnwBIyt22DXFFvnbpD4O+HXEM6Dc3NyQ/dXV1aqpqRl2/JNPPimPx6PFixcrJSVFQ0ND2rt3r7Zv3x72mjEv5NraWu3ZsyfWywATEvkCYqu3t1cOhyP4eKRnx5L0s5/9TD/96U/V0NCgpUuXqru7W7t27VJ2drbKy8vDWivm77J2uVxyu93Brbe3N9ZLAhMG+QJiy+FwhGyjFfLjjz+uJ598Un/913+t+++/X9/4xjf03e9+V7W1tWGvFfNnyKPdbwcwduQLMMMf/vAHTZoU+hw3JSVFfr8/7GvEvJABABjvysrKtHfvXuXl5Wnp0qV66623tH//fu3YsSPsa1gu5IGBAV2+fDn4+MqVK+ru7lZWVpby8vKsXg7AnyBfQHJ65pln9P3vf19///d/r/7+fmVnZ+tv//ZvtXv37rCvYbmQz58/r3Xr1gUfO51OSVJ5ebmOHj1q9XIA/gT5ApJTRkaG6urqVFdXF/E1LBfy2rVrLf0pMADhI1/AxMXfsgYAwAAUMgAABqCQAQAwAIUMAIABKGQAAAxAIQMAYAAKGQAAA1DIAAAYgEIGAMAAFDIAAAagkAEAMACFDACAAShkAAAMQCEDAGAAChkAAANQyAAAGIBCBgDAABQyAAAGoJABADAAhQwAgAEoZAAADEAhAwBgAAoZAAADUMgAABiAQgYAwAAUMgAABqCQAQAwAIUMAIABKGQAAAxAIQMAMEbz5s2TzWYbtlVWVoZ9jckxnA8AgAmho6NDQ0NDwccXLlzQhg0b9PWvfz3sa1h6hlxbW6tVq1YpIyNDM2fO1NatW3Xp0iUrlwBwG2QMSE4zZszQ7Nmzg9vJkyd177336sEHHwz7GpYKubW1VZWVlWpvb1dzc7M++eQTbdy4UV6v1/LwAIYjY4BZPB5PyObz+e54zuDgoP7t3/5NO3bskM1mC3stS7esT58+HfL46NGjmjlzpjo7O/XAAw9YuRSAEZAxIDoWpKUpfVLkb5Ma8Pslj5Sbmxuyv7q6WjU1Nbc998SJE7p+/bq++c1vWlpzTK8hu91uSVJWVtaox/h8vpDfKDwez1iWBCaUO2WMfAGx1dvbK4fDEXxst9vveM7zzz+v0tJSZWdnW1or4l8f/H6/du3apTVr1mjZsmWjHldbW6vMzMzg9vnfNgCMLJyMkS8gthwOR8h2p0L+7W9/q1dffVXf/va3La8VcSFXVlbqwoULOnbs2G2Pc7lccrvdwa23tzfSJYEJJZyMkS/ALEeOHNHMmTO1efNmy+dGdMu6qqpKJ0+eVFtbm3Jycm57rN1uD+spPoDPhJsx8gWYw+/368iRIyovL9fkydbr1dIZgUBAjz32mBobG3XmzBnNnz/f8oIARkfGgOT16quv6r333tOOHTsiOt9SIVdWVqqhoUGvvPKKMjIydPXqVUlSZmampkyZEtEAAD5DxoDktXHjRgUCgYjPt/Qacn19vdxut9auXas5c+YEt+PHj0c8AIDPkDFg4rJ8yxpA7JAxYOLiyyUAADAAhQwAgAEoZAAADEAhAwBgAAoZAAADUMgAABiAQgYAwAAUMgAABqCQAQAwAIUMAIABKGQAAAxAIQMAYAAKGQAAA1DIAAAYgEIGAMAAFDIAAAagkAEAMACFDACAAShkAAAMQCEDAGAAChkAAANQyAAAGIBCBgDAABQyAAAGoJABADAAhQwAgAEoZAAADEAhAwBgAAoZAIAoeP/99/U3f/M3mjZtmqZMmaL7779f58+fD/v8yTGcDQCACeH3v/+91qxZo3Xr1unUqVOaMWOG3n33Xd19991hX4NCBgBgjJ5++mnl5ubqyJEjwX3z58+3dA1Lt6zr6+uVn58vh8Mhh8Oh4uJinTp1ytKCAEZHxgCzeDyekM3n84143L//+79r5cqV+vrXv66ZM2fqy1/+sp577jlLa1l6hpyTk6N9+/bpvvvuUyAQ0L/+679qy5Yteuutt7R06VJLCwMYjowB0XHPPIcck1MiPt/z6ZDU/75yc3ND9ldXV6umpmbY8f/1X/+l+vp6OZ1Ofe9731NHR4f+4R/+QampqSovLw9rTUuFXFZWFvJ47969qq+vV3t7Oz8sgCggY4BZent75XA4go/tdvuIx/n9fq1cuVI/+MEPJElf/vKXdeHCBT377LOxKeQ/NTQ0pJdeekler1fFxcWjHufz+UKe4ns8nkiXBCaUcDJGvoDYuvXy0Z3MmTNHS5YsCdn3pS99ST//+c/DXsvyx556enqUnp4uu92uv/u7v1NjY+OwIf5UbW2tMjMzg9vnn/4DCGUlY+QLMMOaNWt06dKlkH2//vWvNXfu3LCvYbmQFy1apO7ubr355pv6zne+o/Lycr3zzjujHu9yueR2u4Nbb2+v1SWBCcVKxsgXYIbvfve7am9v1w9+8ANdvnxZDQ0N+vGPf6zKysqwr2H5lnVqaqoWLFggSSosLFRHR4cOHDigw4cPj3i83W4f9Z47gOGsZIx8AWZYtWqVGhsb5XK59E//9E+aP3++6urqtH379rCvMebPIfv9/lHfBg5g7MgYkBy+9rWv6Wtf+1rE51sqZJfLpdLSUuXl5enGjRtqaGjQmTNn1NTUFPEAAD5DxoCJy1Ih9/f365FHHtEHH3ygzMxM5efnq6mpSRs2bIjVfMCEQsaAictSIT///POxmgOAyBgwkfFtTwAAGIBCBgDAABQyAAAGoJABADAAhQwAgAEoZAAADEAhAwBgAAoZAAADUMgAABiAQgYAwAAUMgAABqCQAQAwAIUMAIABKGQAAAxAIQMAYAAKGQAAA1DIAAAYgEIGAMAAFDIAAAagkAEAMACFDACAAShkAAAMQCEDAGAAChkAAANQyAAAGIBCBgDAABQyAAAGoJABABijmpoa2Wy2kG3x4sWWrjE5RrMBADChLF26VK+++mrw8eTJ1iqWQgYAIAomT56s2bNnR3w+t6wBABiFx+MJ2Xw+36jHvvvuu8rOztY999yj7du367333rO01pgKed++fbLZbNq1a9dYLgNgBOQLiFzG/TnKKMiLfLs/R5KUm5urzMzM4FZbWzvieqtXr9bRo0d1+vRp1dfX68qVK/qLv/gL3bhxI+yZI75l3dHRocOHDys/Pz/SSwAYBfkCzNDb2yuHwxF8bLfbRzyutLQ0+N/z8/O1evVqzZ07Vz/72c/0rW99K6y1InqGPDAwoO3bt+u5557T3XffHcklAIyCfAHmcDgcIdtohfx5X/ziF7Vw4UJdvnw57LUieoZcWVmpzZs3q6SkRE899dRtj/X5fCH33N1utyTJY+FpPBAvt/5dBgKBhM0QlXwNeGM640QS8H6c6BHGDc8f/vh/y0TmK14GBgb0m9/8Rt/4xjfCPsdyIR87dkxdXV3q6OgI6/ja2lrt2bNn2P7chUutLg3EzUcffaTMzMy4rxutfM39ysZojwZETaLyFUv/+I//qLKyMs2dO1f//d//rerqaqWkpOjhhx8O+xqWCrm3t1c7d+5Uc3Oz0tLSwjrH5XLJ6XQGH1+/fl1z587Ve++9Z/T/Qzwej3Jzc4e9fmAa5owut9utvLw8ZWVlxX1t8mUe5oyuROYr1n73u9/p4Ycf1kcffaQZM2boz//8z9Xe3q4ZM2aEfQ1LhdzZ2an+/n6tWLEiuG9oaEhtbW06ePCgfD6fUlJSQs6x2+0j3nPPzMw0+h/OLbdeNzAdc0bXpEnx/0Qg+TIXc0ZXIvIVa8eOHRvzNSwV8vr169XT0xOyr6KiQosXL9YTTzwx7IcFgPCRL2Bis1TIGRkZWrZsWci+qVOnatq0acP2A7CGfAETW9zvG9jtdlVXV4f91vFEYc7oYs74SJb5mTO6mHN8sAUmwvvPAQATgsfjUWZmpj761gY5Ur8Q+XUGP9G055vldrvj9rr8+HtlHQCAJEQhAwBgAAoZAAADUMgAABiAQgYAwABxLeRDhw5p3rx5SktL0+rVq3Xu3Ll4Lh+WtrY2lZWVKTs7WzabTSdOnEj0SMPU1tZq1apVysjI0MyZM7V161ZdunQp0WMNU19fr/z8/OBfDyouLtapU6cSPdYdJfP3EJuesWTIl0TGYimZ8xVrcSvk48ePy+l0qrq6Wl1dXSooKNCmTZvU398frxHC4vV6VVBQoEOHDiV6lFG1traqsrJS7e3tam5u1ieffKKNGzfK6zXrG35ycnK0b98+dXZ26vz583rooYe0ZcsWvf3224kebVTJ/D3EyZCxZMiXRMZiJZnzFQ9x+xzy6tWrtWrVKh08eFCS5Pf7lZubq8cee0xPPvlkPEawzGazqbGxUVu3bk30KLd17do1zZw5U62trXrggQcSPc5tZWVl6Uc/+lHYX9gdTwMDA1qxYoX+5V/+RU899ZSWL1+uurq6RI8VtmTLWLLkSyJj0RCvfPE55DsYHBxUZ2enSkpKPlt40iSVlJTo7Nmz8RhhXLv1Hbgmf4PK0NCQjh07Jq/Xq+Li4kSPM6I//R7iZEPGYouMjV0y5yteLH8fciQ+/PBDDQ0NadasWSH7Z82apYsXL8ZjhHHL7/dr165dWrNmjZF/77inp0fFxcW6efOm0tPT1djYqCVLliR6rGGsfg+xachY7JCxsUv2fMVLXAoZsVNZWakLFy7o9ddfT/QoI1q0aJG6u7vldrv18ssvq7y8XK2trUb9wIjke4gxcZCxsSFf4YtLIU+fPl0pKSnq6+sL2d/X16fZs2fHY4RxqaqqSidPnlRbW5tycnISPc6IUlNTtWDBAklSYWGhOjo6dODAAR0+fDjBk30mku8hNg0Ziw0yNnbjIV/xEpfXkFNTU1VYWKiWlpbgPr/fr5aWFiNf6zBdIBBQVVWVGhsb9dprr2n+/PmJHilsfr9fPp8v0WOEuPU9xN3d3cFt5cqV2r59u7q7u5PihwUZiy4yFj3jIV/xErdb1k6nU+Xl5Vq5cqWKiopUV1cnr9erioqKeI0QloGBAV2+fDn4+MqVK+ru7lZWVpby8vISONlnKisr1dDQoFdeeUUZGRm6evWqJCkzM1NTpkxJ8HSfcblcKi0tVV5enm7cuKGGhgadOXNGTU1NiR4txHj5HuJkyFgy5EsiY9E0XvIVD3Er5G3btunatWvavXu3rl69quXLl+v06dPD3oSSaOfPn9e6deuCj51OpySpvLxcR48eTdBUoerr6yVJa9euDdl/5MgRffOb34z/QKPo7+/XI488og8++ECZmZnKz89XU1OTNmzYkOjRxqVkyFgy5EsiY0gMvg8ZADBu8DlkAAAwJhQyAAAGoJABADAAhQwAgAEoZAAADEAhAwBggLgXss/nU01NjVF/SWYkzBldzBkfyTI/c0YXc44PEX0O+dChQ/rRj36kq1evqqCgQM8884yKiorCOvfWZ8Ti+dmuSDBndDFn+MiXOZgzuuIx54T6HPLx48fldDpVXV2trq4uFRQUaNOmTerv74/FfMCEQr6AictyIe/fv1+PPvqoKioqtGTJEj377LO666679MILL8RiPmBCIV/AxGXpb1kPDg6qs7NTLpcruG/SpEkqKSnR2bNnRzzH5/OFvF5w/fp1SZLb7Y5g3PjxeDwh/2kq5oyuW/8u/X5/3NcmX+ZhzuhKZL6SgaVC/vDDDzU0NDTsj9XPmjVLFy9eHPGc2tpa7dmzZ9h+k77Z5XZyc3MTPUJYmDO6/ud//kdf/OIX47om+TIXc0ZXIvIVb/v27ZPL5dLOnTtVV1cX1jkx/7Ynl8sV/EYX6Y+/IeXl5enKnm/LkZYa6+UBSzw3BzW/+ieaNm1aokcJy6j5+sZaZaTG7cvcxrWBnt8leoRx48bQkO7vfDdp8hWpjo4OHT58WPn5+ZbOs5TY6dOnKyUlRX19fSH7+/r6NHv27BHPsdvtstvtw/Y70lLlSBu+HzCBzWaL+5rRzFdG6uQxvcMUn5k0OSXRI4w7ichXpD7/MsBombtlYGBA27dv13PPPaennnrK0lqW3tSVmpqqwsJCtbS0BPf5/X61tLSouLjY0sIAQpEvIHpsCxfK9qXFkW8LF0r648sAmZmZwa22tva261ZWVmrz5s0qKSmxPLPle1pOp1Pl5eVauXKlioqKVFdXJ6/Xq4qKCsuLAwhFvgCz9Pb2hnwO+XbPjo8dO6auri51dHREtJblQt62bZuuXbum3bt36+rVq1q+fLlOnz497I0oAKwjX4BZHA5HWH8YpLe3Vzt37lRzc7PS0tIiWiuid31UVVWpqqoqogUB3B75ApJPZ2en+vv7tWLFiuC+oaEhtbW16eDBg/L5fEpJuf37EXgbJgAAY7R+/Xr19PSE7KuoqNDixYv1xBNP3LGMJQoZAIAxy8jI0LJly0L2TZ06VdOmTRu2fzR8/SIAAAbgGTIAADFw5swZS8fzDBkAAANQyAAAGIBCBgDAABQyAAAGoJABADAAhQwAgAEoZAAADEAhAwBgAAoZAAADUMgAABiAQgYAwAAUMgAABqCQAQAwAIUMAIABKGQAAAxAIQMAYAAKGQAAA1DIAAAYgEIGAMAAFDIAAAagkAEAMACFDACAAShkAAAMQCEDAGAAChkAAANQyAAAGIBCBgDAAJYLua2tTWVlZcrOzpbNZtOJEydiMBYwMZEvYOKyXMher1cFBQU6dOhQLOYBJjTyBUxck62eUFpaqtLS0ljMAkx45AuYuGL+GrLP55PH4wnZAEQH+QLMUF9fr/z8fDkcDjkcDhUXF+vUqVOWrhHzQq6trVVmZmZwy83NjfWSwIRBvgAz5OTkaN++fers7NT58+f10EMPacuWLXr77bfDvkbMC9nlcsntdge33t7eWC8JTBjkC4itz9+B8vl8Ix5XVlamr371q7rvvvu0cOFC7d27V+np6Wpvbw97LcuvIVtlt9tlt9tjvQwwIZEvYBSL7pemTon8fO/HkjTsrlN1dbVqampue+rQ0JBeeukleb1eFRcXh71kzAsZAIBk1dvbK4fDEXx8u1+Ae3p6VFxcrJs3byo9PV2NjY1asmRJ2GtZLuSBgQFdvnw5+PjKlSvq7u5WVlaW8vLyrF4OwJ8gX4BZbr1JKxyLFi1Sd3e33G63Xn75ZZWXl6u1tTXsUrZcyOfPn9e6deuCj51OpySpvLxcR48etXo5AH+CfAHJKzU1VQsWLJAkFRYWqqOjQwcOHNDhw4fDOt9yIa9du1aBQMDqaQDCQL6A8cPv94/6JrCR8BoyAABj5HK5VFpaqry8PN24cUMNDQ06c+aMmpqawr4GhQwAwBj19/frkUce0QcffKDMzEzl5+erqalJGzZsCPsaFDIAAGP0/PPPj/kafP0iAAAGoJABADAAhQwAgAEoZAAADEAhAwBgAAoZAAADUMgAABiAQgYAwAAUMgAABqCQAQAwAIUMAIABKGQAAAxAIQMAYAAKGQAAA1DIAAAYgEIGAMAAFDIAAAagkAEAMACFDACAAShkAAAMQCEDAGAAChkAAANQyAAAGIBCBgDAABQyAAAGoJABADAAhQwAgAEoZAAADGCpkGtra7Vq1SplZGRo5syZ2rp1qy5duhSr2YAJh4wBE5elQm5tbVVlZaXa29vV3NysTz75RBs3bpTX643VfMCEQsaAiWuylYNPnz4d8vjo0aOaOXOmOjs79cADD0R1MGAiImNAcqqtrdUvfvELXbx4UVOmTNFXvvIVPf3001q0aFHY17BUyJ/ndrslSVlZWaMe4/P55PP5go89Ho8k6Vf/54TSJ/ESNswy4PcneoQQd8rYaPn6xc+6NMVGvqLhdc/NRI8wbgwqkOgRYubW3a1Vq1bp008/1fe+9z1t3LhR77zzjqZOnRrWNSIuZL/fr127dmnNmjVatmzZqMfV1tZqz549kS4DTFjhZIx8AbF165fcW+x2u+x2+7DjonF3K+JCrqys1IULF/T666/f9jiXyyWn0xl87PF4lJubG+mywIQRTsbIFzCySffma1JGeuTn3xiQpGF5qq6uVk1NzR3PD+cO8udFVMhVVVU6efKk2tralJOTc9tjR/ttAsDows0Y+QJiq7e3Vw6HI/g4nLyFewf58ywVciAQ0GOPPabGxkadOXNG8+fPt3I6gDsgY4BZHA5HSCGHI9w7yJ9nqZArKyvV0NCgV155RRkZGbp69aokKTMzU1OmTLG0MIDhyBiQ3KzcQf48S2/DrK+vl9vt1tq1azVnzpzgdvz4cUuLAhgZGQOSUyAQUFVVlRobG/Xaa69FdHfL8i1rALFDxoDkFI27W3xQEQCAMYrG3a0x/WEQAAAQnbtbPEMGAMAAFDIAAAagkAEAMACFDACAAShkAAAMQCEDAGAAChkAAANQyAAAGIBCBgDAABQyAAAGoJABADAAhQwAgAEoZAAADEAhAwBgAAoZAAADUMgAABiAQgYAwAAUMgAABqCQAQAwAIUMAIABKGQAAAxAIQMAYAAKGQAAA1DIAAAYgEIGAMAAFDIAAAagkAEAMACFDACAAShkAAAMYKmQ6+vrlZ+fL4fDIYfDoeLiYp06dSpWswETDhkDJi5LhZyTk6N9+/aps7NT58+f10MPPaQtW7bo7bffjtV8wIRCxoDk1dbWprKyMmVnZ8tms+nEiROWzrdUyGVlZfrqV7+q++67TwsXLtTevXuVnp6u9vZ2S4sCGBkZA5KX1+tVQUGBDh06FNH5kyNdeGhoSC+99JK8Xq+Ki4tHPc7n88nn8wUfezyeSJcEJpRwMka+gNj6fKbsdrvsdvuIx5aWlqq0tDTitSwXck9Pj4qLi3Xz5k2lp6ersbFRS5YsGfX42tpa7dmzJ+IBgYnGSsbIFzAy2/Qc2RyOyM+3/7GIc3NzQ/ZXV1erpqZmLKONyvK7rBctWqTu7m69+eab+s53vqPy8nK98847ox7vcrnkdruDW29v75gGBsY7KxkjX0Bs9fb2hmTM5XLFbC3Lz5BTU1O1YMECSVJhYaE6Ojp04MABHT58eMTjb/f0HsBwVjJGvoDYuvWJh3gY8+eQ/X5/yGtYAKKLjAETg6VnyC6XS6WlpcrLy9ONGzfU0NCgM2fOqKmpKVbzARMKGQMmLkuF3N/fr0ceeUQffPCBMjMzlZ+fr6amJm3YsCFW8wETChkDktfAwIAuX74cfHzlyhV1d3crKytLeXl5dzzfUiE///zz1icEEDYyBiSv8+fPa926dcHHTqdTklReXq6jR4/e8fyIP4cMAAA+s3btWgUCgYjP58slAAAwAIUMAIABKGQAAAxAIQMAYAAKGQAAA1DIAAAYgEIGAMAAFDIAAAagkAEAMACFDACAAShkAAAMQCEDAGAAChkAAANQyAAAGIBCBgDAABQyAAAGoJABADAAhQwAgAEoZAAADEAhAwBgAAoZAAADUMgAABiAQgYAwAAUMgAABqCQAQAwAIUMAIABKGQAAAxAIQMAYAAKGQAAA1DIAAAYYEyFvG/fPtlsNu3atStK4wC4hXwByefQoUOaN2+e0tLStHr1ap07dy7scyMu5I6ODh0+fFj5+fmRXgLAKMgXkHyOHz8up9Op6upqdXV1qaCgQJs2bVJ/f39Y50dUyAMDA9q+fbuee+453X333ZFcAsAoyBeQnPbv369HH31UFRUVWrJkiZ599lndddddeuGFF8I6f3Iki1ZWVmrz5s0qKSnRU089ddtjfT6ffD5f8LHb7ZYk3df1phwORyTLAzHj8Xik3FwFAoGEzRCNfJW+8yvyFSX/K9EDjCMej0dH4pQvz40bUTnf4/GE7Lfb7bLb7cOOHxwcVGdnp1wuV3DfpEmTVFJSorNnz4a1puVCPnbsmLq6utTR0RHW8bW1tdqzZ8+w/bm5uVaXBuLmo48+UmZmZtzXJV+YCGKZr9TUVM2ePVu5C5eO+Vrp6enDslRdXa2ampphx3744YcaGhrSrFmzQvbPmjVLFy9eDGs9S4Xc29urnTt3qrm5WWlpaWGd43K55HQ6g4+vX7+uuXPn6r333kvID7xweTwe5ebmqre31+hnGswZXW63W3l5ecrKyor72uTLPMwZXfHIV1pamq5cuaLBwcExXysQCMhms4XsG+nZcbRYKuTOzk719/drxYoVwX1DQ0Nqa2vTwYMH5fP5lJKSEnLOaE/vMzMzjf6Hc4vD4WDOKEqWOSdNiv8nAsmXuZgzumKdr7S0tLB/qY2W6dOnKyUlRX19fSH7+/r6NHv27LCuYamQ169fr56enpB9FRUVWrx4sZ544olhPywAhI98AckrNTVVhYWFamlp0datWyVJfr9fLS0tqqqqCusalgo5IyNDy5YtC9k3depUTZs2bdh+ANaQLyC5OZ1OlZeXa+XKlSoqKlJdXZ28Xq8qKirCOj+id1mPhd1uV3V1dUzvw0cDc0YXc8ZHsszPnNHFnGbYtm2brl27pt27d+vq1atavny5Tp8+PeyNXqOxBRL5+Q4AACCJv2UNAIARKGQAAAxAIQMAYAAKGQAAA1DIAAAYgEIGAMAAFDIAAAagkAEAMACFDACAAShkAAAMQCEDAGCA/w/Vz2J00UMrmgAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAALQAAAGlCAYAAABTMNy4AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAuJ0lEQVR4nO3dfVgU57038O+yZoHAsgoKvvCiiVUjyosgFk0MIuDhGKJpm9iEPG6wtVUXA9kTHrPmimAjQppzDB7lQeMx0CsJAZNTpIccIZQGODYSYSktmGpi1IR45MUk7rILAu7O84fdDe/uPTu7IePvc11ztTvXzG/uwNdhZva+55ZwHMeBEJFw+b4bQIiQKNBEVCjQRFQo0ERUKNBEVCjQRFQo0ERUKNBEVCjQRFQo0ERUnBro/Px8zJ07F25ublixYgXOnj3Lu1Z9fT2SkpIwe/ZsSCQSnDx5knetnJwcLF++HHK5HL6+vti4cSMuXLjAux4AFBQUICQkBF5eXvDy8kJ0dDROnTplV02L3NxcSCQSpKenC1JPTJwW6NLSUqjVamRmZqK5uRmhoaFYt24durq6eNUzGo0IDQ1Ffn6+3W2rq6uDSqVCQ0MDqqurMTg4iISEBBiNRt41/f39kZubC61Wi6amJsTGxmLDhg04d+6cXW1tbGzE0aNHERISYlcd0eKcJCoqilOpVNbPJpOJmz17NpeTk2N3bQBcWVmZ3XUsurq6OABcXV2dYDU5juOmTZvG/cd//Afv/Xt6ergf/ehHXHV1Nffwww9zaWlpwjVOJJxyhh4YGIBWq0VcXJx1nYuLC+Li4nDmzBlnNIGJTqcDAHh7ewtSz2QyoaSkBEajEdHR0bzrqFQqrF+/ftjPkQw3xRkHuX79OkwmE/z8/Iat9/Pzw/nz553RBJuZzWakp6dj1apVWLJkiV21WltbER0djZs3b8LT0xNlZWVYvHgxr1olJSVobm5GY2OjXW0SO6cE+odEpVKhra0Np0+ftrvWwoUL0dLSAp1Oh/feew9KpRJ1dXXMoW5vb0daWhqqq6vh5uZmd7tEzRnXNf39/ZxUKh11nbt582bu0Ucftbs+BLqGVqlUnL+/P3fp0iW7a41l7dq13K9+9Svm/crKyjgAnFQqtS4AOIlEwkmlUu7WrVsOaO0Pk1OuoWUyGSIiIlBTU2NdZzabUVNTY9c1pVA4jkNqairKysrwpz/9CfPmzXPIccxmM/r7+5n3W7t2LVpbW9HS0mJdIiMjkZycjJaWFkilUge09ofJaZccarUaSqUSkZGRiIqKQl5eHoxGI1JSUnjVMxgMuHjxovXz5cuX0dLSAm9vbwQGBjLVUqlUKC4uRnl5OeRyOTo6OgAACoUC7u7uvNqn0WiQmJiIwMBA9PT0oLi4GLW1taiqqmKuJZfLR13Pe3h4wMfHx+7rfNFx5p+DQ4cOcYGBgZxMJuOioqK4hoYG3rU+/PBDDsCoRalUMtcaqw4ArrCwkHf7tmzZwgUFBXEymYybMWMGt3btWu6DDz7gXW8kemw3NgnH0SBZIh7Ul4OICgWaiAoFmogKBZqICgWaiAoFmoiK0wPd39+PrKwsXt+YUT3h64kOn4fXhw8f5oKCgjhXV1cuKiqK+/jjj23eV6fTcQA4nU7H59BUT+B6k41er+fS0tK4wMBAzs3NjYuOjubOnj1r8/7MZ2ihR54QMtQvf/lLVFdX480330RraysSEhIQFxeHq1ev2laA9V+QvSNPJvsZ626rN5n09vZyUqmUq6ioGLZ+2bJl3IsvvmhTDabOSZaRJxqNxrruTiNP+vv7h13v3bhxA8B3o0Lspdfrh/2v2OtZfm5ms1mQeuO5efMmBgYG7K7DcRwkEsmwda6urnB1dR217a1bt2AymUb1+XZ3d7e9fzrLv6CrV69yALiPPvpo2PqMjAwuKipqzH0yMzPH7fxDC//l888/Z/nVMenr6+PuhUSQdnp6eo5al5mZOe6xo6OjuYcffpi7evUqd+vWLe7NN9/kXFxcuAULFtjUdod3H9VoNFCr1dbPOp0OgYGBaP/0HLzkckcfXnT0PT0IWBAMHx8fhx1jYGAAveCQDA/IILnzDuPVAYe3DQa0t7fDy8vLun6ss7PFm2++iS1btmDOnDmQSqVYtmwZnnzySWi1WpuOyRTo6dOnQyqVorOzc9j6zs5OzJw5c8x9xvvz4iWXD/uPJGxG/hl3BBkkdgXawvJuElvcf//9qKurg9FohF6vx6xZs7Bp0ybcd999Nu3P9JRjso88IeLh4eGBWbNm4dtvv0VVVRU2bNhg037MlxxCjzwhZKiqqipwHIeFCxfi4sWLyMjIwKJFi2zOF3OgN23ahO7ubuzZswcdHR0ICwtDZWXlqFcUEMKHTqeDRqPBV199BW9vb/z0pz9FdnY27rnnHpv2d/qIFb1eD4VCAd21L+kamge9Xg/FrEDodDqH/fwsv6MUeNp9U1gIg0PbOhJ1TiKiQoEmokKBJqJCgSaiQoEmokKBJqJCgSaiQoEmokKBJqLCHGghZ58iRGjMgRZy9ilChMbcOSkxMRGJiYmOaAshdnP4iJWRYwqFGltHyFgcflOYk5MDhUJhXQICAhx9SHIXc3igNRoNdDqddWlvb3f0IcldzOGXHOONKSTEEeg5NBEV5jO0kLNPESI05kA3NTVhzZo11s+Wd24olUoUFRUJ1jBC+GAOdExMDJw8DJEQm9E1NBEVCjQRFQo0ERUKNBEVCjQRFQo0ERUKNBEVCjQRFQo0ERWmQOfk5GD58uWQy+Xw9fXFxo0bceHCBUe1jRBmTIGuq6uDSqVCQ0MDqqurMTg4iISEBBiNRke1j9xFTCYTXnrpJcybNw/u7u64//778fLLLzN1tWDqy1FZWTnsc1FREXx9faHVarF69WqWUoSM8sorr6CgoAC/+93vEBwcjKamJqSkpEChUODZZ5+1qYZdHfwtc+Z5e3uPuw2NKSS2+uijj7BhwwasX78eADB37ly88847OHv2rM01eN8Ums1mpKenY9WqVViyZMm429GYQqLX64ctQ09wQ61cuRI1NTX49NNPAQB//etfcfr0aaa3DPA+Q6tUKrS1td1xhs+R8xTq9XoK9Q9EtJcr3CX8H4T1cWYU6g2jft+ZmZnIysoatf0LL7wAvV6PRYsWQSqVwmQyITs7G8nJyTYfk1egU1NTUVFRgfr6evj7+0+4LY0pJLZOvHnixAm8/fbbKC4uRnBwMFpaWpCeno7Zs2dDqVTadCymQHMch507d6KsrAy1tbWYN28ey+7kLmXrxJsZGRl44YUX8POf/xwAsHTpUnzxxRfIyclxTKBVKhWKi4tRXl4OuVyOjo4OAIBCoYC7uztLKUJG6e3thYvL8EscqVQKs9lscw2mQBcUFAC4PQxrqMLCQjzzzDMspQgZJSkpCdnZ2QgMDERwcDD+8pe/4MCBA9iyZYvNNZgvOQhxlEOHDuGll17Cjh070NXVhdmzZ+PXv/419uzZY3MNh79ohhBbyeVy5OXlIS8vj3cN6pxERIUCTUSFAk1EhQJNRIUCTUSFAk1EhQJNRIUCTUSFKdAFBQUICQmxdjaJjo7GqVOnHNU2QpgxBdrf3x+5ubnQarVoampCbGwsNmzYgHPnzjmqfYQwYfrqOykpadjn7OxsFBQUoKGhAcHBwYI2jBA+ePflMJlMePfdd2E0GhEdHT3udjSmkDgT801ha2srPD094erqim3btqGsrAyLFy8ed3saU0iciTnQCxcuREtLCz7++GNs374dSqUSn3zyybjb0zyFxJmYLzlkMhnmz58PAIiIiEBjYyMOHjyIo0ePjrk9jSkkzmT3c2iz2TzusHRCnI3pDK3RaJCYmIjAwED09PSguLgYtbW1qKqqclT7CGHCFOiuri5s3rwZ165dg0KhQEhICKqqqhAfH++o9hHChCnQx48fd1Q7CBEE9eUgokKBJqJCgSaiQoEmokKBJqJCgSaiQoEmouLUQOfn52Pp0qUAgNh/fhRnm7S8a9Wf/jOSfrYJs+9fBInHVJz8rwretXJePYDlD62B3M8fvkHzsXHTU7jw6We86wFAwbHjCIlaCa+ZAfCaGYDoNfE4VVVtV82hFAoF0tPTBasnFk4LdGlpKdRqNXbt2gUAWLL4Aazb8BN0dXXzqmc09iJ06VLkv/aq3W2rO/1nqH71SzR8WI3q/yrD4OAtJDz6mF2ze/nPmY3c32RBe7oWTf/zIWIfXo0Nm57CuU/+bldbtS1/BYAJpwG5m0k4J71SdMWKFVi+fDn2798PhUKBb69eQXBkNHZu+xVeeP45u2pLPKairOQtbEx6RJC2dndfh+/c+aireh+rH1wlSE0A8Pafi1ezf4NfKDfz2t9gMCAs+iF8fukyHnzwQURERNj1YsPx6PV6KBQKvO7lY/eUFL/Sfw2dTmfTC8+FYNcZOjc3FxKJ5I5/+gYGBqDVahEXF/fdgV1cELfmYZxhmOHIWXT/GFXjPW2aIPVMJhNK3v1PGI29iI6K4l1H9dzzWLc2VpA2iRXvQDc2NuLo0aMICQm547bXr1+HyWSCn5/fsPV+vr7o6Ozi2wSHMJvNSP+/GqyK/jGWBI8/EscWrW3n4Ok7B67TfLEt7TmUvfMWFj+wiFetknf/E80tf0Pm7l12tWkymzt3LiQSyahFpVLZXIPXmEKDwYDk5GQcO3YM+/btm3Db/v5+9PT0AACMRqN1bkN9Tw/6B/phMpkEGWfY29snSJ3ndu1Ga9s5VJb/p931Zs30w/9Un4Jer0d5xX9j89Zt+O/fn8CihQuY6nx19X/x7PO7cLL0bQwMDgIQ58vnGxsbYTKZrJ/b2toQHx+Pxx9/3OYavK6hlUolvL298dprryEmJgZhYWHjXstlZWVh7969rIcgNpBIJHBxcUF/fz+kUqlgdSfLNXR6ejoqKirw2WefQSKR2LQP8xm6pKQEzc3NaGxstGl7yzyFsbGxiIiIwIsvvoigoCBcuXIFK1euxNatW4fNY8jKMu/hsWPH8MQTT/CqwXEcMjIyUFFRgdLSUqxevXrUVGT2tq+9vR1PPfUUAgICrHPV2Kqnp8c6FtNgMCA+Ph7h4eEIDg7Grl27BA2zI4z8S2fLsLyBgQG89dZbUKvVNocZYAx0e3s70tLSUF1dDTc3N5v2sTQ+IyMDSqUS4eHhAG6fuXt7e7F9+3ZewTEYDLh48SIMBgMAoLu7G5cuXYK3tzcCAwOZau3YsQMnTpxAeXk5Zs6cCQDo6+uDj48P79m9LKN7pv3jxvLAgQM4ffo0qqqqmP97vby8MGfOHADfhcPDwwM+Pj4OfXw3380Nni78z9AGsxnQw+aJN4c6efIkbty4wT4ZFcegrKyMA8BJpVLrAoCTSCScVCrlbt26NeH+hw4d4gICAjgAXEREBNfQ0MBy+GE+/PBDDsCoRalUMtcaqw4ArrCwkHf7tmzZwgUFBXEymYwDwD388MPcBx98wLuehU6n4wBwDz74IJeWlmZ3vYmO8SffOdzZmQG8lz/5zuEAcO3t7ZxOp7MuN2/evGMbEhISuEceeYS57UzX0D09Pfjiiy+GrUtJScGiRYuwa9cum84WluszoZ5NUj3hWY7xJ985dp+hY7uuMrf1iy++wH333Yff//732LBhA9MxmS455HL5qNCy/ulzdXVFZmamYK82oHriU1hYCF9fX6xfv555X7u/KbzTUw7yw/N9nqHNZjPmzZuHJ598Erm5uczHtHuewtraWntLEGL1xz/+EV9++SXT7LFD0cSbZFJJSEiw60sj6g9NRIUCTUTF6YHu7+9HVlaWYO/Do3pkKKf1h7aY7M9l77Z6Ex3j+3oObQ9erc3Pz8fcuXPh5uaGFStW4Owk7NNM7k7MgbYMpcrMzERzczNCQ0Oxbt06dHVNrn7N5O7E/NjuwIED2Lp1K1JSUgAAR44cwfvvv4833ngDL7zwwqjtR86xcuPGDQCw9ou2l6WzjlBzt0z2epafm9lsFqSe2DAF2jKUSqPRWNe5uLggLi4OZ86cGXOfnJycMftDs/aIuxOh526Z7PW++eYbTJ06VdCaYsAU6HGHUvn54fz582PuY+kPbaHT6RAYGIgvPvoAXp4ePJo8Gvf53wSpYyG5/87DylgJ1UZ9bx/mJqvh4+MjSD2xcfg3heN15vby9ICX3FOQY3Ae/Posj0ciULuGEryNDJ3e7yZMN4XTp0+HVCpFZ2fnsPWdnZ3WjvGEfJ+YAi2TyRAREYGamhrrOrPZjJqamgkn3yTEWZgvOdRqNZRKJSIjIxEVFYW8vDwYjUbrUw9Cvk/Mgd60aRO6u7uxZ88edHR0ICwsDJWVlaNuFAn5PvC6KUxNTUVqaqrQbSHEbtTbjogKBZqICgWaiAoFmogKBZqICgWaiAoFmogKBZqICgWaiApzoOvr65GUlITZs2dDIpHg5MmTDmgWIfwwB9poNCI0NBT5+fmOaA8hdmHuy5GYmIjExESbtx85plCosXWEjMXh19A5OTlQKBTWReixdYQM5fBAazQa6HQ662KZK4QQR/jexhQS4gj02I5MKlevXsXTTz9tnbBp6dKlaGpqsnl/ej80mTS+/fZbrFq1CmvWrMGpU6cwY8YMfPbZZ9aZxGzBHGjLdGoWly9fRktLC6/p1AgZ6pVXXkFAQAAKCwut6+bNm8dUg/mSo6mpCeHh4db5BtVqNcLDw7Fnzx7WUuQuodfrhy3jvQr4D3/4AyIjI/H444/D19cX4eHhOHbsGNOxmM/QMTExopxnmox231wveE3hP0ut/pYJ6Lpq88Sbly5dQkFBAdRqNXbv3o3GxkY8++yzkMlkUCqVNh2TrqGJw42cZnq8p15msxmRkZHYv38/ACA8PBxtbW04cuSIzYGmpxzE4by8vIYt4wV61qxZWLx48bB1DzzwAL788kubj0WBJpPGqlWrcOHChWHrPv30UwQFBdlcgwJNJo3nnnsODQ0N2L9/Py5evIji4mK8/vrrUKlUNtegQJNJY/ny5SgrK8M777yDJUuW4OWXX0ZeXh6Sk5NtrkE3hWRSeeSRR/DII4/w3p/O0ERUKNBEVCjQRFSYAp2Tk4Ply5dDLpfD19cXGzduHPWYhZDvE1Og6+rqoFKp0NDQgOrqagwODiIhIQFGo9FR7SOECdNTjsrKymGfi4qK4OvrC61Wi9WrV4+5D40pJM5k1zW0ZRJIb2/vcbehMYXEmXgH2mw2Iz09HatWrcKSJUvG3Y7GFBJn4v3FikqlQltbG06fPj3hdjSmkDgT7zlWKioqUF9fD39/f6HbRAhvTIHmOA47d+5EWVkZamtrmYfHEOJoTIFWqVQoLi5GeXk55HI5Ojo6AAAKhQLu7sJO/UsIH0w3hQUFBdDpdIiJicGsWbOsS2lpqaPaRwgT5ksOQiYz6stBRIUCTUSFAk1EhQJNRIUCTUSFAk1EhQJNRIUCTUSFAk1Ehfmr75CQEOs7yqKjo3Hq1ClHtY0QZkyB9vf3R25uLrRaLZqamhAbG4sNGzbg3LlzjmofIUyY+nIkJSUN+5ydnY2CggI0NDQgODhY0IYRwgfvESsmkwnvvvsujEYjoqOjx92OBskSZ2K+KWxtbYWnpydcXV2xbds2lJWVjXqn71A0SJY4E3OgFy5ciJaWFnz88cfYvn07lEolPvnkk3G3p0GyxJmYLzlkMhnmz58PAIiIiEBjYyMOHjyIo0ePjrk9DZIlzmT3c2iz2TzurEaEsMjKyoJEIhm2LFq0iKkG0xlao9EgMTERgYGB6OnpQXFxMWpra1FVVcV0UELGExwcjD/+8Y/Wz1OmsF1EMG3d1dWFzZs349q1a1AoFAgJCUFVVRXi4+OZDkrIeKZMmYKZM2fy359l4+PHj/M+ELl7jXxUO9F91WeffYbZs2fDzc0N0dHRyMnJYZqhmKakIOOSL/WHXHYP7/25gUHg7N9tnnhzxYoVKCoqwsKFC3Ht2jXs3bsXDz30ENra2iCXy206JgWaOJytE28mJiZa/39ISAhWrFiBoKAgnDhxAr/4xS9sOhYFmjicpTMbq6lTp2LBggW4ePGizftQ91EyaRkMBnz++eeYNWuWzftQoMmk8fzzz6Ourg5XrlzBRx99hMceewxSqRRPPvmkzTXokoNMGl999RWefPJJfP3115gxYwYefPBBNDQ0YMaMGTbXoECTSaOkpMTuGk695MjPz8fSpUsBAHFPbcXZllbeteo/1uLRX+yE/4o4TEl4BuV/1vKulftOBX6cuhdTN2zDrMd34rFfpePC51d41wOAgrdOIOyffoapS1di6tKVWJX2Mk6d/ZtdNYdSKBRIT08XrJ5YOC3QpaWlUKvV2LVrFwBgycL5SFRuR9f1r3nVM/b1IfSBhTj0G43dbatvPY/tj8bizwdfQmVuBgZv3cI/bd4GY28v75r+M32xf1caGv/wDs6WF2NN2AP4SdZBnLty1a62Nn92BQAmnAbkbmZXoHNzcyGRSGw6Uxw4cABbt27F008/ffvzSxm4190Nhe+e5HXsxJgH8fLzqXhs3Vpe+w/13/ufhzLhIQTPnYPQ+wNR+Opv8OX/XoO29e+8aybFxeCf1zyEH80LwoL75mJfys/g6e6Gj/9u+yOokQx9N/Hr1woB3H6kRUbjHejGxkYcPXoUISEhd9x2YGAAWq0WcXFx3x3YxQVrV/0YZ5qF+zMsFF2PAQDgPZX92elYTCYTSj9sgPFmP368eD7vOjsPvYn4SDozT4RXoA0GA5KTk3Hs2DFMmzbtjttfv34dJpMJfn5+w9b7TfdBZ/d1Pk1wGLPZjOde/i1WRYZhycIf2VWr9fxn8Ar+MdwXLseOf/8d3svcicVBc3jVKv2wAX+5+AX2PL3RrjaJHa+nHCqVCuvXr0dcXBz27ds34bb9/f3o6ekBABiNRuvchnqDEf0DAzCZzND/44xoj97+AeiNfXbX+ZcjxWg9/xlOFRXY3a5ZvtNRf6IIeoMB5SdOIOW3x/Bf+9RYFGD7FwUA8NX1b5D+/97G77OexcCtWwDo5fPjkXCMP5mSkhJkZ2ejsbERbm5uiImJQVhYGPLy8sbcPisrC3v37hWirWQEiUQCFxcX9Pf3QyqVClZXr9dDoVDg61/Ew8uOzkn6gUH4HK+GTqfj9dU3H0xn6Pb2dqSlpaG6uhpubm427aPRaKBWqxEbG4uIiAi8+OKLCAoKwpUrV7By5Ups3boVarWaV+OB2z/8gIAAHDt2DE888QSvGhzHISMjAxUVFSgtLcXq1atHdaixt33t7e146qmnEBAQgIKCAqYaPT091rGYBoMB8fHxCA8PR3BwMHbt2iVomH/omAKt1WrR1dWFZcuWWdeZTCbU19fj8OHDY54pLH1fMzIyoFQqER4eDuD2mbu3txfbt2/nFRyDwYCLFy/CYLh9WdDd3Y1Lly7B29ubqf8sAOzYsQMnTpxAeXm5tXN5X18ffHx8eM/uZRndY7nHOHDgAE6fPo2qqirm/14vLy/MmXP72tvSt9jDwwM+Pj70+G4EpkCvXbsWra3DvwxJSUnBokWL7nim2LRpE7q7u7F//34At1+HUFlZOepG0VZNTU1Ys2aN9fPu3buxe/duKJVKFBUVMdWynDFjYmKs6xYsWIDCwkI888wzvNo3dHQPADQ3N9PoHidgvoYe6U7X0CNZrs+Euq6iesL7IV9DO723naurKzIzMwV7tQHVI0PZfYYm4kNnaEImCQo0ERUKNBEVCjQRFacHur+/H1lZWYK9D4/qkaF4PeXIz8/Hq6++io6ODoSGhuLQoUOIioqyad/J/lz2bqs30THuiqcclpEnmZmZaG5uRmhoKNatW4euri5HtI8QJsyBtow8SUlJweLFi3HkyBHce++9eOONNxzRPkKYMPXlsIw80Wi+G8fn4uKCuLg4nDlzZsx9Rs6xcuPGDQCw9ou2l6WzjlBzt0z2epafm9lsFqSe2DAFetyRJ35+OH/+/Jj75OTkjNkfmrVH3J0IPXfLZK/3zTff0LjCMTj8vRyW/tAWOp0OgYGBuLz3l/Bykzn68Lz89V9Pft9NGJfRbEbS1x3w8fH5vpsyKTEFevr06ZBKpejs7By2vrOzc9yXVI/3LmAvNxm83CZnBxtPl8n/eF4ikXzfTZiUmH5zMpkMERERqKmpsa4zm82oqamZcK5CQpyF+ZJDrVZDqVQiMjISUVFRyMvLg9FoREpKiiPaRwgT5kBbRp7s2bMHHR0dCAsLs2vkCSFC4nVTmJqaitTUVKHbQojdJv/dDyEMKNBEVCjQZNJieRmoBQWaTEosLwMdigJNHE6v1w9b7tSXm/VloEPRlBRkXJIFCyBx5/9trqSvH0C1zRNvWrC8DHQkCjRxOFsn3gRuvwy0ubkZjY2NvI5FgSYOZ+vEm3xeBjoSBZpMGnxeBjoS801hfX09kpKSMHv2bEgkEpw8eZK54YSMxfIy0JaWFusSGRmJ5ORktLS02PTaYOYztNFoRGhoKLZs2YKf/OQnvBpOyFjkcvmo1wOzvjaYOdCJiYlITExk3Y0Qp3D4NfTIMYVCja0jd4fa2lqm7R3+xUpOTg4UCoV1EXpsHSFDOTzQGo0GOp3OuljmCiHEERx+yTHemEJCHIH6chBRYT5DW2afsrh8+TJaWlp4zT5FiNCYAz1y9inLOzf4zD5FiNCYAx0TE0PT8pJJi66hiahQoImoUKCJqFCgiahQoImoUKCJqFCgiahQoImoUKCJqDAFOicnB8uXL4dcLoevry82btyICxcuOKpthDBjCnRdXR1UKhUaGhpQXV2NwcFBJCQkwGg0Oqp9hDBh6stRWVk57HNRURF8fX2h1WqxevVqQRtGCB92dfC3zJnn7e097jY0ppA4E++bQrPZjPT0dKxatWrCIeY0ppA4E+9Aq1QqtLW1oaSkZMLtaEwhcSbec6xUVFSgvr4e/v7+E25LYwqJMzEFmuM47Ny5E2VlZaitrcW8efMc1S5CeGEKtEqlQnFxMcrLyyGXy9HR0QEAUCgUcHd3d0gDCWHBdA1dUFAAnU6HmJgYzJo1y7qUlpY6qn2EMGG+5CBkMqO+HERUKNBEVCjQRFQo0GTSKCgoQEhIiHVOlujoaJw6dYqpBgWaTBr+/v7Izc2FVqtFU1MTYmNjsWHDBpw7d87mGjRpEHG4kR3Sxvv2OCkpadjn7OxsFBQUoKGhAcHBwTYdiwJNxrdwKeBhxxdmxj4AYJ54E7g9+9W7774Lo9GI6Ohomw9JgSYOxzLxZmtrK6Kjo3Hz5k14enqirKwMixcvtvlYFGjicLZOvAkACxcuREtLC3Q6Hd577z0olUrU1dXZHGrmr77tvQslZCIymQzz589HREQEcnJyEBoaioMHD9q8P1OghbgLJYSF2WweNuLpTpguOYS4CyVkPBqNBomJiQgMDERPTw+Ki4tRW1uLqqoqm2vwvoa29S6UxhQSW3V1dWHz5s24du0aFAoFQkJCUFVVhfj4eJtrMAea9S40JycHe/fuZT0MuQsdP37c7hrM3xRa7kI//vhjbN++HUqlEp988sm429OYQuJMzGdoy10oAERERKCxsREHDx7E0aNHx9yexhQSZ7K7LwfrXSghjsR0hhbiLpQQR2IKtBB3oYQ4ElOghbgLJcSRqD80ERUKNBEVCjQRFQo0ERUKNBEVCjQRFQo0ERWnBjo/Px9Lly4FAMQffg9nv+jgXet/Ln6Fja+fROBLr+OetNdQ/reLvGu9Un0WP/7XYkz7v4cx+8UjeP7GdXxxa5B3PQB4r9eAp77uxJquq1jTdRVbvunCR/19dtUcSqFQID09XbB6YuG0QJeWlkKtVmPXrl0AgCWzpmN9we/R1dPLq55xYBAhc2bg338Wa3fb6i9+he0PheL0cz/HqR0/hYkDdn57HX2cmXdNP6kUKk8v/M7bF0XevoiUueL5G1/jczv/oZwfHACACacBuZvZFejc3FxIJBKbzhQHDhzA1q1b8fTTTwMA/u2xh3GvbAqKGtp4HfufFs/Db9avwsbQ+bz2H+r97T+BckUwgmdNR+icGdijmIYOswl/H+Qfvodc3bHK1R2BU+5B0JR7sMNTgXslErT9I5B89JrN2N/zLQBg6tSpvOuIGe9ANzY24ujRowgJCbnjtgMDA9BqtYiLi/vuwC4SxC4IRMOVa3yb4DAG8+3XBitchPkDZuI4fHCzF30ch6X3yHjX+W3PDayQuQnSJrHi9RszGAxITk7GsWPHMG3atDtuf/36dZhMJvj5+Q1b7ye/Fx08LzkcxWzmcKDnBkLvkeH+KffYVevi4CAe7rqKB7uuIlf/LX471Qf38az5wc1eXLg1gK0etr0O4G7Fa0yhSqXC+vXrERcXh3379k24bX9/P3p6egAARqPROreh/uYABkwmmMxm6G/a35+6d3BQkDr/UlaHi7cG8e9Tp8Ng5n8NDQA+Li54fdoMGM1m1PXfRJbuW7w21QVzGUPdZTLh3/Q38NupPhj8x0vn6eXzY5NwjD+ZkpISZGdno7GxEW5uboiJiUFYWBjy8vLG3D4rK4vGFDqIRCKBi4sL+vv7IZVKBaur1+uhUCjwzckj8LLjVWB6Yx+8N26DTqez+UUz9mI6Q7e3tyMtLQ3V1dVwc7PtWk6j0UCtViM2NhYRERF48cUXERQUhCtXrmDlypXYunUr1Go1r8YDt3/4AQEBOHbsGJ544gleNTiOQ0ZGBioqKlBaWorVq1ePen2Vve1rb2/HU089hYCAABQUFDDV6OnpsY7FNBgMiI+PR3h4OIKDg7Fr1y5Bw/xDxxRorVaLrq4uLFu2zLrOZDKhvr4ehw8fHvNMYRlTmJGRAaVSifDwcAC3z9y9vb3Yvn07r+AYDAZcvHgRBoMBANDd3Y1Lly7B29sbgYGBTLV27NiBEydOoLy8HDNnzgQA9PX1wcfHh/fsXpbRPZZ7jAMHDuD06dOoqqpi/u/18vLCnDlzAHz3GggPDw/4+PjQ47sRmAK9du1atLa2DluXkpKCRYsW3fFMsWnTJnR3d2P//v0Abr8OobKyctSNoq2ampqwZs0a6+fdu3dj9+7dUCqVKCoqYqplOWPGxMRY1y1YsACFhYV45plneLVv6OgeAGhubqbRPU7AfA090p2uoUeyXJ8JdV1F9YT3Q76GdnpfDldXV2RmZgr2agOqR4ay+wxNxIfO0IRMEhRoIioUaCIqFGgiKk4PdH9/P7KysgR7Hx7VE4+cnBwsX74ccrkcvr6+2LhxIy5cuMBUg1eg8/PzMXfuXLi5uWHFihU4e/aszfv29/dj7969gv6CqZ441NXVQaVSoaGhAdXV1RgcHERCQgKMRqPNNZh721lGnhw5cgQrVqxAXl4e1q1bhwsXLsDX15e1HLkL2DrxZmVl5bDPRUVF8PX1hVarxerVq206FnOgLSNPUlJSAABHjhzB+++/jzfeeAMvvPACazkyibncHwIXuSf//Xtu97PhM/EmAGtXY29vb5uPyRRoy8gTjUZjXefi4oK4uDicOXNmzH1GzrFy48aNYY21l+Vfv1Bzt0z2epafm9nOvtrOxDLxpoXZbEZ6ejpWrVrF1AGLKdDjjjzx88P58+fH3Ge8OVZYe8TdycizgNjrffPNNz+YcYUsE29aqFQqtLW14fTp00z7OXwmWUt/aAudTofAwEBc/j8xkMuEObyh9StB6lh88PcuQesBwJkeYW7iBsDhbfTCx8dHkHqTUWpqKioqKlBfXw9/f3+mfZkSNX36dEilUnR2dg5b39nZae1HPNJ4NwBy2RR4yewbs2fhMkXYDu7uEuGfZsogEbSeRCJsvcmA4zjs3LkTZWVlqK2txbx585hrMP3mZDIZIiIiUFNTY11nNptRU1Mz4VyFhNhCpVLhrbfeQnFxMeRyOTo6OtDR0YG+Pttf0MP8N1+tVkOpVCIyMhJRUVHIy8uD0Wi0PvUghK+xBloAYBpowRxoy8iTPXv2oKOjA2FhYXaNPCHEQoiezLzuylJTU5Gammr3wQkRGnVOIqJCgSaiQoEmokKBJqJCgSaiQoEmokKBJqJCgSaiQoEmokKBJqLCHOj6+nokJSVh9uzZkEgkOHnypAOaRQg/zIE2Go0IDQ1Ffn6+I9pDiF2YOyclJiYiMTHR5u1HjikUamwdIWNx+DV0Tk4OFAqFdRF6bB0hQzk80BqNBjqdzrpY5gohxBEcPkh2vDGFhDgCPbYjokKBJqLCfMlhmU7N4vLly2hpaeE1nRohQmMO9Mjp1CwvkeEznRohQmMOdExMDM0zTSYtuoYmokKBJqJCgSaiQoEmokKBJqJCgSaiQoEmokKBJqJCgSaTir1D/JgCLcRMn4RMxN4hfkxffVtm+ly+fDlu3bqF3bt3IyEhAZ988gk8PDx4NYCIn60TbwLsQ/xGYgo0n5k+aUzhD5dkuj8kjNOxDdvf9fbvmu/Em3zYNWLFlpk+x5unkNw9+Ey8yRfvm0JbZ/qkMYXEMvGmZXFkoHmfoW2d6ZPGFBJn4j1pEN+ZPglxJKZACzHTJyETsXeIH1OgVSoViouLUV5ebp3pEwAUCgXc3d0Zm07IaPYO8WMKtBAzfRIyEXuH+DFfchAymVFfDiIqFGgiKhRoIioUaCIqFGgiKhRoIioUaCIqFGgiKhRoIipMgS4oKEBISIi1X2t0dDROnTrlqLYRwowp0P7+/sjNzYVWq0VTUxNiY2OxYcMGnDt3zlHtI4QJU1+OpKSkYZ+zs7NRUFCAhoYGBAcHj7kPjSkkzsT7GtpkMqGkpARGoxHR0dHjbkfzFBJnYg50a2srPD094erqim3btqGsrAyLFy8ed3saU0iciXkI1sKFC9HS0gKdTof33nsPSqUSdXV144aaxhQSZ2IOtEwmw/z58wEAERERaGxsxMGDB3H06FHBG0cIK7ufQ5vN5mE3fYR8n5jO0BqNBomJiQgMDERPTw+Ki4tRW1uLqqoqR7WPECZMge7q6sLmzZtx7do1KBQKhISEoKqqCvHx8Y5qHyFMmAJ9/PhxR7WDEEFQXw4iKhRoIioUaCIqFGgiKhRoIioUaCIqFGgiKhRoIioUaCIqdgU6NzcXEokE6enpAjWHECA/Px9z586Fm5sbVqxYgbNnz9q8L+9ANzY24ujRowgJCeFbgpBRSktLoVarkZmZiebmZoSGhmLdunXo6uqyaX9egTYYDEhOTsaxY8cwbdo0PiUIGdOBAwewdetWpKSkYPHixThy5AjuvfdevPHGGzbtz2vSIJVKhfXr1yMuLg779u2bcNuRg2Qtcxv2ZRXiHjsmdRxKKkiV7zwqcD0ha+r1ehQGBDjl5fP6nh5B9rd1JtmBgQFotVpoNBrrOhcXF8TFxeHMmTM2HZM50CUlJWhubkZjY6NN24838SYNlrXP119/DYVC4ZDaMpkMM2fORMCCsUfys/D09LR5Jtnr16/DZDLBz89v2Ho/Pz+cP3/epuMxBbq9vR1paWmorq6Gm5ubTftoNBrrxC8AcOPGDQQFBeHLL78U5Bei1+sREBAwarZSsdbT6XQIDAyccPZee7m5ueHy5csYGBiwuxbHcZBIJMPWTZqJN7VaLbq6urBs2TLrOpPJhPr6ehw+fBj9/f2QSodfAIz350WhUAjyC7awvM3pbqnn4uLYJ65ubm42n7SEMn36dEilUnR2dg5b39nZiZkzZ9pUg+mnsnbtWrS2tqKlpcW6REZGIjk5GS0tLaPCTAgLmUyGiIgI1NTUWNeZzWbU1NRM+O6XoZjO0HK5fNS83h4eHvDx8Zlwvm9CbKVWq6FUKhEZGYmoqCjk5eXBaDQiJSXFpv15z/XNl6urKzIzMwW7jqJ64rJp0yZ0d3djz5496OjoQFhYGCorK0fdKI5HwtHkg0REqC8HERUKNBEVCjQRFQo0ERUKNBEVCjQRFQo0ERUKNBEVCjQRFQo0ERUKNBGV/w/0TQi8+Wp1/wAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgIAAAGlCAYAAACItJ4HAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAxGUlEQVR4nO3dfXBUdZ7v8U8nmg4P6UggD3JJEIYRN2qCBKF6dFxAJKQylNTdtawaVwO6zEo1Xph4HUlp8aAyQXEcWWGQeVDcnaFgnRm0LspDBCFaBgnJZgZQcKlliqxDEh21GzLSgaTvH5rWniR4Ouf00znvV+qUnpNzcr5J5Ru//r6/8zuuUCgUEgAAcKS0RAcAAAASh0IAAAAHoxAAAMDBKAQAAHAwCgEAAByMQgAAAAejEAAAwMEoBAAAcDAKAQAAHIxCAAAAB4trIbBhwwZdddVVyszM1LRp03To0KF43t6Q+vp6zZ07V6NHj5bL5dIrr7yS6JD6qK2t1Y033qisrCzl5eVp3rx5OnHiRKLD6mPjxo0qKSmRx+ORx+OR1+vVzp07Ex3WN1qzZo1cLpeWLl2a6FCiluw5lgr5JZFjsZTK+WVXcSsEtm3bpurqaq1YsULNzc0qLS1VeXm5Ojo64hWCIZ2dnSotLdWGDRsSHcqADhw4IJ/Pp4MHD6qurk4XLlzQ7Nmz1dnZmejQIowZM0Zr1qxRU1OTDh8+rJkzZ+r222/XsWPHEh3agBobG7Vp0yaVlJQkOpSopUKOpUJ+SeRYrKRyftlaKE6mTp0a8vl84f3u7u7Q6NGjQ7W1tfEKIWqSQtu3b090GN+oo6MjJCl04MCBRIfyjUaMGBH65S9/megw+nX27NnQt7/97VBdXV3o7//+70NLlixJdEhRSbUcS5X8CoXIMSuken7ZWVxGBLq6utTU1KRZs2aFj6WlpWnWrFlqaGiIRwi25vf7JUk5OTkJjmRg3d3d2rp1qzo7O+X1ehMdTr98Pp8qKysjfk9TBTkWW+SYeamcX3Z3WTxu8vHHH6u7u1v5+fkRx/Pz83X8+PF4hGBbPT09Wrp0qW666SZdd911iQ6njyNHjsjr9er8+fMaPny4tm/fruLi4kSH1cfWrVvV3NysxsbGRIcyKORY7JBj5qV6ftldXAoBxI7P59PRo0f19ttvJzqUfk2cOFEtLS3y+/367W9/q6qqKh04cCCp/lC1trZqyZIlqqurU2ZmZqLDQZIhx8whv5JfXAqBUaNGKT09Xe3t7RHH29vbVVBQEI8QbGnx4sXasWOH6uvrNWbMmESH06+MjAxNmDBBklRWVqbGxkatW7dOmzZtSnBkX2lqalJHR4cmT54cPtbd3a36+nqtX79ewWBQ6enpCYzwm5FjsUGOmWeH/LK7uMwRyMjIUFlZmfbu3Rs+1tPTo7179yZlLyvZhUIhLV68WNu3b9e+ffs0bty4RIdkWE9Pj4LBYKLDiHDrrbfqyJEjamlpCW9TpkzRXXfdpZaWlpT4I0WOWYscs44d8svu4tYaqK6uVlVVlaZMmaKpU6fq2WefVWdnpxYsWBCvEAw5d+6cTp48Gd4/deqUWlpalJOTo6KiogRG9hWfz6ctW7bo1VdfVVZWltra2iRJ2dnZGjJkSIKj+0pNTY0qKipUVFSks2fPasuWLdq/f792796d6NAiZGVl9en9Dhs2TCNHjkzKnvBAUiHHUiG/JHLMSnbJL1uL5yMKzz33XKioqCiUkZERmjp1aujgwYPxvL0hb775ZkhSn62qqirRoYX1F5+k0Isvvpjo0CLce++9obFjx4YyMjJCubm5oVtvvTW0Z8+eRIdlSKo+3pTsOZYK+RUKkWOxlqr5ZVeuUCgUimfhAQAAkgfvGgAAwMEoBAAAcDAKAQAAHIxCAAAAB6MQAADAwSgEAABwsLgXAsFgUCtXrkyqla/6Q5zWIs74SJX4idNaxAlTBrP4wPr160Njx44Nud3u0NSpU0Pvvvuu4Wv9fn9IUsjv9w/m1nFDnNYiTuPIr+RBnNZKlTjNMJO/iRL1iMC2bdtUXV2tFStWqLm5WaWlpSovL1dHR4d11QngUOQXkLpSNX+jLgSeeeYZLVy4UAsWLFBxcbGef/55DR06VC+88EIs4gMchfwCUleq5m9ULx3q6upSU1OTampqwsfS0tI0a9YsNTQ09HtNMBiM6Ad99tlnkiS/3z+IcOMnEAhE/DNZEae1en8ve3p64n5v8iv5EKe14pVf58+fV1dXl+mvEwqF5HK5Io653W653e4+5w4mf5NFVIXAxx9/rO7ubuXn50ccz8/P1/Hjx/u9pra2VqtWrepzPJneNHYphYWFiQ7BEOK01ieffKIrrrgirvckv5IXcVorlvl1/vx5DckeJnWZLzaGDx+uc+fORRxbsWKFVq5c2efcweRvsoj5a4hrampUXV0d3vf7/V/8kbo5X7qMpxeRZC72SG+3a+TIkYmOxJBUzq/23zcnOgRD8v/35ESHYFiy/0zPBs5qwlVXxzS/urq6vigCvltgLgcu9ujcW21qbW2Vx+MJH+5vNCDVRVUIjBo1Sunp6Wpvb4843t7eroKCgn6vGWgYRZelJf0fKjjX3w4HxoPT8uvrf1yTWpL/HL8uVX6mcckvi3LA4/EY+rkOJn+TRVQ/pYyMDJWVlWnv3r3hYz09Pdq7d6+8Xq/lwQFOQn4BFkqzYItCKudv1K2B6upqVVVVacqUKZo6daqeffZZdXZ2asGCBbGID3AU8guwiMv1xWbm+iilav5GXQjceeed+uijj7R8+XK1tbVp0qRJ2rVrV58JEgCiR34BqStV89cVCoVC8bxhIBBQdna2NP3KlOq9wSEu9kj7z8jv96dMv/XrUim/Pt/1QaJDMGTInKsTHYJhyf4zDQQCys+5Mqb5Fc6BWf9LutxEDlzokd74MGX/FkQj5k8NAAAQdwloDaSq5P5fBgAAEFOMCAAA7GcQM//7XO8QFAIAAPuhNWAYhQAAwH5cX25mrncIBw1+AACAv8WIAADAftJcX2xmrncICgEAgP3QGjCM1gAAAA7GiAAAwH54asAwCgEAgP3QGjCM1gAAAA7GiAAAwH54asAwCgEAgP3QGjCM1gAAAA7GiAAAwH54asAwCgEAgP0wR8AwCgEAgP0wR8Aw5ggAAOBgjAgAAOzHJZNzBCyLJOlRCAAA7MlB/zE3g9YAAAAOxogAAMB+eGrAMAoBAID98NSAYbQGAABwMEYEAAD2w8qChlEIAADsJ03mxrwdNF7uoG8VAAD8LUYEAAD2Q2vAMAoBAID98NSAYRQCAAD7YUTAsKjnCNTX12vu3LkaPXq0XC6XXnnllRiEBTgT+QUg3qIuBDo7O1VaWqoNGzbEIh7A0cgvwCJpFmwOEXVroKKiQhUVFbGIBXA88guwCK0Bw2I+RyAYDCoYDIb3A4FArG8JOAb5BcCsmBcCtbW1WrVqVaxvAzjSQPnV/vtmeTyeBERk3JA5Vyc6BEM+3/VBokMwLOl/phd74ncvnhowLOZdkJqaGvn9/vDW2toa61sCjkF+AQPoffugmc0hYj4i4Ha75Xa7Y30bwJHILwBmsY4AAMB+mCxoWNSFwLlz53Ty5Mnw/qlTp9TS0qKcnBwVFRVZGhzgNOQXYBHmCBgWdSFw+PBhzZgxI7xfXV0tSaqqqtLmzZstCwxwIvILQLxFXQhMnz5doVAoFrEAjkd+AVZxyWVieD/koCEB5ggAAGzH5TJXCMjlklNKcgoBAIDtmJ0rKJccUwg4aDVlAADwtxgRAADYTprJ1kDI5VIc10FMKAoBAIDtWDFHwCloDQAA4GCMCAAAbIcRAeMoBAAAtkMhYBytAQAAHIwRAQCA7VixjoBTUAgAAGyH1oBxtAYAAHAwRgQAALbDiIBxFAIAANtxfflh5is4BYUAAMB2GBEwjjkCAAA4GCMCAADb4fFB4ygEAAC2k+aSybcPWhhMkqM1AACAgzEiAACwHSYLGkchAACwHQoB42gNAAAQR3/605903333ady4cRoyZIi+9a1vacWKFerq6oo4749//KO++93vKjMzU4WFhXrqqadiEg8jAgAA+zH51EAsJwseP35cPT092rRpkyZMmKCjR49q4cKF6uzs1NNPPy1JCgQCmj17tmbNmqXnn39eR44c0b333qsrrrhCP/jBDyyNh0IAAGA7ZlsDptoK32DOnDmaM2dOeH/8+PE6ceKENm7cGC4EfvOb36irq0svvPCCMjIydO2116qlpUXPPPOM5YUArQEAAAYQCAQitmAwGJP7+P1+5eTkhPcbGhp0yy23KCMjI3ysvLxcJ06c0KeffmrpvSkEAAC20zsiYGaTpMLCQmVnZ4e32tpay2M9efKknnvuOf3Lv/xL+FhbW5vy8/Mjzuvdb2trs/T+tAYAALbjksnWwJdLC7a2tsrj8YSPu93uAa9ZtmyZnnzyyUt+3ffff1/XXHNNeP/DDz/UnDlzdMcdd2jhwoWDjtcMCgEAgO1YNUfA4/FEFAKX8uCDD2r+/PmXPGf8+PHhf//zn/+sGTNm6Dvf+Y5+/vOfR5xXUFCg9vb2iGO9+wUFBYbiMYpCAAAAC+Tm5io3N9fQuR9++KFmzJihsrIyvfjii0pLi+zUe71ePfLII7pw4YIuv/xySVJdXZ0mTpyoESNGWBo3cwQAALbT+9IhM1usfPjhh5o+fbqKior09NNP66OPPlJbW1tE7//73/++MjIydN999+nYsWPatm2b1q1bp+rqasvjYUQAAGA7yfz4YF1dnU6ePKmTJ09qzJgxEZ8LhUKSpOzsbO3Zs0c+n09lZWUaNWqUli9fbvmjg1KUIwK1tbW68cYblZWVpby8PM2bN08nTpywPCjAqcgxwP7mz5+vUCjU7/Z1JSUleuutt3T+/Hn9z//8jx5++OGYxBNVIXDgwAH5fD4dPHhQdXV1unDhgmbPnq3Ozs6YBAc4DTkGWMOqxwedIKrWwK5duyL2N2/erLy8PDU1NemWW26xNDDAicgxwBppLpfSeOmQIabmCPj9fkmKWA3pbwWDwYiVmAKBgJlbAo7yTTlGfgEwa9CFQE9Pj5YuXaqbbrpJ11133YDn1dbWatWqVYO9DeBYRnJsoPzK/9+TpcuS+6Ggz3d9kOgQDBky5+pEh2BYsv9MA4GA8nOujMu9zM78d9CAwOAfH/T5fDp69Ki2bt16yfNqamrk9/vDW2tr62BvCTiKkRwjv4D+MUfAuEGNCCxevFg7duxQfX19n0cf/pbb7b7kkowA+jKaY+QXALOiKgRCoZAeeOABbd++Xfv379e4ceNiFRfgSOQYYA3Xlx9mrneKqAoBn8+nLVu26NVXX1VWVlZ4FaTs7GwNGTIkJgECTkKOAdZI5gWFkk1UcwQ2btwov9+v6dOn68orrwxv27Zti1V8gKOQY4A1mCNgXNStAQCxQ44BiDfeNQAAsB0eHzSOQgAAYDvMETAuuVccAQAAMcWIAADAdhgRMI5CAABgP2Zn/juoEKA1AACAgzEiAACwHZ4aMI5CAABgO8wRMI7WAAAADsaIAADAdr5oDZgZEbAwmCRHIQAAsB1aA8ZRCAAAbMclk5MFLYsk+TFHAAAAB2NEAABgO7QGjKMQAADYDoWAcbQGAABwMEYEAAC2w4iAcRQCAADbYYlh42gNAADgYIwIAABsh9aAcRQCAAD7oTdgGK0BAAAcjBEBAIDt0BowjkIAAGA7dAaMoxAAANgOIwLGMUcAAAAHY0QAAGA7jAgYRyEAALAdCgHjaA0AAOBgjAgAAGyHpwaMoxAAANgOrQHjomoNbNy4USUlJfJ4PPJ4PPJ6vdq5c2esYgMchxwDEG9RFQJjxozRmjVr1NTUpMOHD2vmzJm6/fbbdezYsVjFBzgKOQZY5MsRgcFuTuoNRNUamDt3bsT+6tWrtXHjRh08eFDXXnutpYEBTkSOAdagNWDcoOcIdHd36+WXX1ZnZ6e8Xu+A5wWDQQWDwfB+IBAY7C0BRzGSY+QXALOiLgSOHDkir9er8+fPa/jw4dq+fbuKi4sHPL+2tlarVq0yFSTgJNHk2ED51f77Znk8nliHasqQOVcnOgRDPt/1QaJDMCzpf6YXe+J2K0YEjIt6HYGJEyeqpaVF7777rhYtWqSqqiq99957A55fU1Mjv98f3lpbW00FDNhdNDlGfgH9623zm9mcIuoRgYyMDE2YMEGSVFZWpsbGRq1bt06bNm3q93y32y23220uSsBBoskx8gvon0smRwTknErA9MqCPT09ET1KANYixwDEUlQjAjU1NaqoqFBRUZHOnj2rLVu2aP/+/dq9e3es4gMchRwDrMEcAeOiKgQ6Ojp0zz336MyZM8rOzlZJSYl2796t2267LVbxAY5CjgHWoBAwLqpC4Fe/+lWs4gAgcgxA/PGuAQCA7fDSIeMoBAAAtkNrwDjTTw0AAIDUxYgAAMB+XDLZG7AskqRHIQAAsB1aA8bRGgAAwMEYEQAA2E6a64vNzPVOQSEAALAdWgPGUQgAAGwnzeVSmon/mJu5NtUwRwAAAAdjRAAAYDu0BoxjRAAAYDtpFmzxEAwGNWnSJLlcLrW0tER87o9//KO++93vKjMzU4WFhXrqqadiEgOFAAAACfKjH/1Io0eP7nM8EAho9uzZGjt2rJqamrR27VqtXLlSP//5zy2PgdYAAMB2XCYnC8ajNbBz507t2bNHv/vd77Rz586Iz/3mN79RV1eXXnjhBWVkZOjaa69VS0uLnnnmGf3gBz+wNA5GBAAAttM7R8DMJn3xf+Zf34LBoCXxtbe3a+HChfr3f/93DR06tM/nGxoadMsttygjIyN8rLy8XCdOnNCnn35qSQy9KAQAABhAYWGhsrOzw1ttba3prxkKhTR//nzdf//9mjJlSr/ntLW1KT8/P+JY735bW5vpGL6O1gAAwHasWkegtbVVHo8nfNztdg94zbJly/Tkk09e8uu+//772rNnj86ePauamppBx2clCgEAgO1Y9figx+OJKAQu5cEHH9T8+fMvec748eO1b98+NTQ09CkqpkyZorvuuksvvfSSCgoK1N7eHvH53v2CggKD34UxFAIAANsx+wjgYK7Nzc1Vbm7uN573r//6r3riiSfC+3/+859VXl6ubdu2adq0aZIkr9erRx55RBcuXNDll18uSaqrq9PEiRM1YsSIQUQ3MAoBAADiqKioKGJ/+PDhkqRvfetbGjNmjCTp+9//vlatWqX77rtPDz/8sI4ePap169bppz/9qeXxUAgAAGwn1d81kJ2drT179sjn86msrEyjRo3S8uXLLX90UKIQAADYUCotMXzVVVcpFAr1OV5SUqK33nor5vfn8UEAAByMEQEAgO2kemsgnigEAAC24/pyM3O9U9AaAADAwRgRAADYDq0B4ygEAAC2kyaThYCDmgO0BgAAcDBGBAAAtpNK6wgkGoUAAMB2XCbnCDipEDDVGlizZo1cLpeWLl1qUTgAepFfwOC5LNicYtCFQGNjozZt2qSSkhIr4wEg8gtA/AyqEDh37pzuuusu/eIXv7D8dYiA05FfgHm9jw+a2ZxiUHMEfD6fKisrNWvWrIh3KvcnGAwqGAyG9/1+/xf/crFnMLcGYuvL38v+XgASL1bk19nA2ZjGaIkU+RsQCAQSHYJxyf4zjWN+sY6AcVEXAlu3blVzc7MaGxsNnV9bW6tVq1b1/cTb7dHeGoibv/zlL8rOzo77fa3KrwlXXW11aI6Vn3NlokOwnUTlF/oXVSHQ2tqqJUuWqK6uTpmZmYauqampUXV1dXj/s88+09ixY3X69Omk/kUIBAIqLCxUa2urPB5PosMZEHFay+/3q6ioSDk5OXG/N/mVfIjTWvHML5fL3Mx/Bw0IRFcINDU1qaOjQ5MnTw4f6+7uVn19vdavX69gMKj09PSIa9xut9xud5+vlZ2dndS/sL08Hg9xWihV4kxLi/9aW+RX8iJOa8Ujv2gNGBdVIXDrrbfqyJEjEccWLFiga665Rg8//HCfP1IAjCO/ACRCVIVAVlaWrrvuuohjw4YN08iRI/scBxAd8guwDq8hNi7uKwu63W6tWLGi3+HMZEKc1iLO+EiV+InTWsTZF60B41yhRD4nBQCAhQKBgLKzs3Xv/1ukjGGDLzi6OoN6Ye5G+f3+lJh3YQbvGgAA2A4jAsZRCAAAbIe3DxpHIQAAsJ00mXurXvwfIE4cJ32vAADgbzAiAACwH5OtASctLRjXEYENGzboqquuUmZmpqZNm6ZDhw7F8/aG1NfXa+7cuRo9erRcLpdeeeWVRIfUR21trW688UZlZWUpLy9P8+bN04kTJxIdVh8bN25USUlJeLUzr9ernTt3Jjqsb7RmzRq5XC4tXbo00aFELdlzLBXySyLHYile+cXbB42LWyGwbds2VVdXa8WKFWpublZpaanKy8vV0dERrxAM6ezsVGlpqTZs2JDoUAZ04MAB+Xw+HTx4UHV1dbpw4YJmz56tzs7ORIcWYcyYMVqzZo2ampp0+PBhzZw5U7fffruOHTuW6NAG1NjYqE2bNqmkpCTRoUQtFXIsFfJLIsdiJZXzy87ito7AtGnTdOONN2r9+vWSpJ6eHhUWFuqBBx7QsmXL4hFC1Fwul7Zv36558+YlOpRL+uijj5SXl6cDBw7olltuSXQ4l5STk6O1a9fqvvvuS3QofZw7d06TJ0/Wz372Mz3xxBOaNGmSnn322USHZViq5Viq5JdEjlkhXvnVu46Ab9f/kdvEOgLBzqA2zPlXR6wjEJcRga6uLjU1NWnWrFlf3TgtTbNmzVJDQ0M8QrC13nfQJ+KNeUZ1d3dr69at6uzslNfrTXQ4/fL5fKqsrIz4PU0V5FhskWPmxTu/eh8fNLM5RVwmC3788cfq7u5Wfn5+xPH8/HwdP348HiHYVk9Pj5YuXaqbbropKdejP3LkiLxer86fP6/hw4dr+/btKi4uTnRYfWzdulXNzc1qbGxMdCiDQo7FDjlmXqrnl93x1ECK8/l8Onr0qN5+++1Eh9KviRMnqqWlRX6/X7/97W9VVVWlAwcOJNUfqtbWVi1ZskR1dXXKzMxMdDhIMuSYOYnKrzS5lGbi1UFmrk01cSkERo0apfT0dLW3t0ccb29vV0FBQTxCsKXFixdrx44dqq+v15gxYxIdTr8yMjI0YcIESVJZWZkaGxu1bt06bdq0KcGRfaWpqUkdHR2aPHly+Fh3d7fq6+u1fv16BYPBpH8FMDkWG+SYeYnKL1YWNC4ucwQyMjJUVlamvXv3ho/19PRo7969SdnLSnahUEiLFy/W9u3btW/fPo0bNy7RIRnW09OjYDCY6DAi3HrrrTpy5IhaWlrC25QpU3TXXXeppaUl6YsAiRyzGjlmHTvkl93FrTVQXV2tqqoqTZkyRVOnTtWzzz6rzs5OLViwIF4hGHLu3DmdPHkyvH/q1Cm1tLQoJydHRUVFCYzsKz6fT1u2bNGrr76qrKwstbW1SZKys7M1ZMiQBEf3lZqaGlVUVKioqEhnz57Vli1btH//fu3evTvRoUXIysrq0/sdNmyYRo4cmZQ94YGkQo6lQn5J5JiVEpVfvHTIuLgVAnfeeac++ugjLV++XG1tbZo0aZJ27drVZ3JToh0+fFgzZswI71dXV0uSqqqqtHnz5gRFFWnjxo2SpOnTp0ccf/HFFzV//vz4BzSAjo4O3XPPPTpz5oyys7NVUlKi3bt367bbbkt0aLaUCjmWCvklkWN24Pryw8z1ThG3dQQAAIi13nUEHnzj/5peR+Ans55mHQEAAGBvPD4IALAd5ggYRyEAALAd15crCZi53imc850CAIA+GBEAANhOmky2Bhz01ACFAADAflwmVwd0Th0Q/9ZAMBjUypUrk2rlq/4Qp7WIMz5SJX7itBZxwoxBrSOwYcMGrV27Vm1tbSotLdVzzz2nqVOnGrq29xnPZH82kzitRZzGkV/JgzitFY84e+9Rs69GmcMH/5Kj8+fOq3ZmbdL/TK0Q9YjAtm3bVF1drRUrVqi5uVmlpaUqLy9XR0dHLOIDHIX8AqzR+/igmc0poi4EnnnmGS1cuFALFixQcXGxnn/+eQ0dOlQvvPBCLOIDHIX8AhBvUU0W7OrqUlNTk2pqasLH0tLSNGvWLDU0NPR7TTAYjOgHffbZZ5Ikv98/iHDjJxAIRPwzWRGntXp/L3t6euJ+b/Ir+RCnteKZX7yG2LioCoGPP/5Y3d3dfV5ikp+fr+PHj/d7TW1trVatWtXneDK9aexSCgsLEx2CIcRprU8++URXXHFFXO9JfiUv4rRWPPIr7csPM9c7RcwfH6ypqQm/YUz6oiIsKiqSbs6XLnPODzqW2n/fnOgQbONs4KwmXHW1Ro4cmehQDEnl/OL31nnimV+MCBgXVSEwatQopaenq729PeJ4e3u7CgoK+r3G7XbL7e7nDVCXpSX9H6pUYfcZrYmQiD8CTssvfm+dy0n/kU0FUf2lyMjIUFlZmfbu3Rs+1tPTo71798rr9VoeHOAk5Bdgnd4RATObU0TdGqiurlZVVZWmTJmiqVOn6tlnn1VnZ6cWLFgQi/gARyG/AGukffnaITPXO0XUhcCdd96pjz76SMuXL1dbW5smTZqkXbt29ZngBCB65BeAeBvUZMHFixdr8eLFVscCQOQXYAUmCxrHS4cAALZjdnVAVhYEAACOwIgAAMB2XF9+mLneKSgEAAC2k+ZKU5rLxMqCJq5NNc75TgEAQB+MCAAAbIenBoyjEAAA2JC5OQJijgAAAKmLxweNY44AAAAORiEAALAdlwUfsfbaa69p2rRpGjJkiEaMGKF58+ZFfP706dOqrKzU0KFDlZeXp4ceekgXL160PA5aAwAA20lzmRveT4txHfC73/1OCxcu1I9//GPNnDlTFy9e1NGjR8Of7+7uVmVlpQoKCvTOO+/ozJkzuueee3T55Zfrxz/+saWxUAgAABBHFy9e1JIlS7R27Vrdd9994ePFxcXhf9+zZ4/ee+89vfHGG8rPz9ekSZP0+OOP6+GHH9bKlSuVkZFhWTy0BgAAtuNypZneJCkQCERswWDQdGzNzc368MMPlZaWphtuuEFXXnmlKioqIkYEGhoadP3110e8ebS8vFyBQEDHjh0zHcPXUQgAAGzHqjkChYWFys7ODm+1tbWmY/vv//5vSdLKlSv16KOPaseOHRoxYoSmT5+uTz75RJLU1tbW5/XjvfttbW2mY/g6CgEAAAbQ2toqv98f3mpqagY8d9myZeGFjAbajh8/rp6eHknSI488on/4h39QWVmZXnzxRblcLr388svx+tbCmCMAALAdq9YR8Hg88ng8hq558MEHNX/+/EueM378eJ05c0ZS5JwAt9ut8ePH6/Tp05KkgoICHTp0KOLa9vb28OesRCEAALCdRCwxnJubq9zc3G88r6ysTG63WydOnNDNN98sSbpw4YL+9Kc/aezYsZIkr9er1atXq6OjQ3l5eZKkuro6eTyeiALCChQCAADEkcfj0f33368VK1aosLBQY8eO1dq1ayVJd9xxhyRp9uzZKi4u1t13362nnnpKbW1tevTRR+Xz+eR2uy2Nh0IAAGA7aXIpzcSiQGauNWLt2rW67LLLdPfdd+vzzz/XtGnTtG/fPo0YMUKSlJ6erh07dmjRokXyer0aNmyYqqqq9Nhjj1keC4UAAMB2kv3tg5dffrmefvppPf300wOeM3bsWL3++usxjUOiEAAA2NDX1wIY7PVO4ZzvFAAA9MGIAADAdpJ9jkAyoRAAANhOss8RSCa0BgAAcDBGBAAANvTV+wIGe71TUAgAAGzHJZOtAQcVArQGAABwMEYEAAC2w1MDxlEIAABshwWFjHPOdwoAAPqIuhCor6/X3LlzNXr0aLlcLr3yyisxCAtwJvILsIbLgg+niLoQ6OzsVGlpqTZs2BCLeABHI78Aa7hcXy0qNLgt0d9B/EQ9R6CiokIVFRWxiAVwPPILsIbZ/6t30ohAzCcLBoNBBYPB8H4gEIj1LQHHGCi/2n/fLI/Hk6iwDBky5+pEh2DI57s+SHQIQEzFfLJgbW2tsrOzw1thYWGsbwk4BvkF9M9cW8DcYkSpJuaFQE1Njfx+f3hrbW2N9S0BxyC/gP71riNgZnOKmLcG3G633G53rG8DOBL5BcAsFhQCANgOryE2LupC4Ny5czp58mR4/9SpU2ppaVFOTo6KioosDQ5wGvILsEbvAL+Z650i6kLg8OHDmjFjRni/urpaklRVVaXNmzdbFhjgROQXgHiLuhCYPn26QqFQLGIBHI/8AqxBa8A45ggAAGyHBYWMc04TBAAA9MGIAADAdtJcLqWZGN43c22qoRAAANgOrQHjKAQAALbDZEHjmCMAAICDMSIAALAhcwsKOen/kykEAAC2Q2vAOOeUPAAAoA9GBAAAtmP2VcK8hhgAgBRGa8A4WgMAADgYIwIAANthQSHjKAQAALZDa8A4WgMAADgYIwIAANv5ojEw+P/XpTUAAEAK4+2DxlEIAABsh8mCxjFHAAAAB2NEAABgOzw1YByFAADAdmgNGEdrAAAAB2NEAABgO7QGjKMQAADYTtqXH2audwrnfKcAAKAPRgQAALZDa8A4CgEAgO3w1IBxtAYAAHAwRgQAAPZjsjUgWgMAAKQuWgPGUQgAAGyHQsC4qOYI1NbW6sYbb1RWVpby8vI0b948nThxIlaxAY5DjgGIt6gKgQMHDsjn8+ngwYOqq6vThQsXNHv2bHV2dsYqPsBRyDHAIi6X+c0homoN7Nq1K2J/8+bNysvLU1NTk2655RZLAwOciBwDrEFrwDhTcwT8fr8kKScnZ8BzgsGggsFgeD8QCEiS2n/fLI/HY+b2+NKQOVcnOgT7uNiT6AgifFOODZRfqeDzXR8kOgRDUim/UuVniuQy6HUEenp6tHTpUt1000267rrrBjyvtrZW2dnZ4a2wsHCwtwQcxUiOkV9A/3pXFjSzOcWgCwGfz6ejR49q69atlzyvpqZGfr8/vLW2tg72loCjGMkx8gvon8uCD6cYVGtg8eLF2rFjh+rr6zVmzJhLnut2u+V2uwcVHOBURnOM/AJgVlSFQCgU0gMPPKDt27dr//79GjduXKziAhyJHAOs4ZK5CX/OGQ+IshDw+XzasmWLXn31VWVlZamtrU2SlJ2drSFDhsQkQMBJyDHAGi6ZfPugg0qBqOYIbNy4UX6/X9OnT9eVV14Z3rZt2xar+ABHIccAxFtUhUAoFOp3mz9/fozCA5yFHAOskeyTBT/44APdfvvtGjVqlDwej26++Wa9+eabEeecPn1alZWVGjp0qPLy8vTQQw/p4sWLlsfCa4gBALaT7IXA9773PV28eFH79u1TU1OTSktL9b3vfS/cDuzu7lZlZaW6urr0zjvv6KWXXtLmzZu1fPlyy2OhEAAA2E4yryPw8ccf67/+67+0bNkylZSU6Nvf/rbWrFmjv/71rzp69Kgkac+ePXrvvff061//WpMmTVJFRYUef/xxbdiwQV1dXZbGQyEAAMAAAoFAxPb1lTwHa+TIkZo4caL+7d/+TZ2dnbp48aI2bdqkvLw8lZWVSZIaGhp0/fXXKz8/P3xdeXm5AoGAjh07ZjqGr6MQAADYjlWtgcLCwojVO2tra83H5nLpjTfe0H/+538qKytLmZmZeuaZZ7Rr1y6NGDFCktTW1hZRBEgK7/e2D6xCIQAAsB2rWgOtra0Rq3fW1NQMeM9ly5Z949c8fvy4QqGQfD6f8vLy9NZbb+nQoUOaN2+e5s6dqzNnzsTrRxRm6qVDAADYmcfjMfyCvAcffPAbn/AZP3689u3bpx07dujTTz8Nf+2f/exnqqur00svvaRly5apoKBAhw4diri2vb1dklRQUBD9N3IJFAIAANsxO/N/MNfm5uYqNzf3G8/761//KklKS4sclE9LS1NPzxdvQPV6vVq9erU6OjqUl5cnSaqrq5PH41FxcXHUsV0KrQEAgO0k8+ODXq9XI0aMUFVVlf7whz/ogw8+0EMPPaRTp06psrJSkjR79mwVFxfr7rvv1h/+8Aft3r1bjz76qHw+n+XvF6EQAAAgjkaNGqVdu3bp3LlzmjlzpqZMmaK3335br776qkpLSyVJ6enp2rFjh9LT0+X1evVP//RPuueee/TYY49ZHg+tAQCA7ZhdCyCW6whI0pQpU7R79+5LnjN27Fi9/vrrMY1DohAAANhQIuYIpCpaAwAAOBgjAgAA22FEwDgKAQCA/Zh9X0CM5wgkEwoBAIANub7czFzvDMwRAADAwRgRAADYTrI/PphMKAQAALbDZEHjaA0AAOBgjAgAAGyHEQHjKAQAALbDHAHjaA0AAOBgjAgAAGzni1UEzLQGnINCAABgO8wRMI7WAAAADsaIAADAdpgsaByFAADAdmgNGEchAACwHUYEjGOOAAAADsaIAADAdmgNGEchAACwIZfMrQbgnEIgqtbAxo0bVVJSIo/HI4/HI6/Xq507d8YqNsBxyDEA8RZVITBmzBitWbNGTU1NOnz4sGbOnKnbb79dx44di1V8gKOQY4A1XBZsThFVa2Du3LkR+6tXr9bGjRt18OBBXXvttZYGBjgROQZYg6cGjBv0HIHu7m69/PLL6uzslNfrHfC8YDCoYDAY3g8EAoO9JeAoRnKM/Iq9z3d9kOgQDBsy5+pEh3BpF3sSHQH6EfXjg0eOHNHw4cPldrt1//33a/v27SouLh7w/NraWmVnZ4e3wsJCUwEDdhdNjpFfwEBoDhgVdSEwceJEtbS06N1339WiRYtUVVWl9957b8Dza2pq5Pf7w1tra6upgAG7iybHyC+gf5QBxkXdGsjIyNCECRMkSWVlZWpsbNS6deu0adOmfs93u91yu93mogQcJJocI78AmGV6HYGenp6IHiUAa5FjwGCwjoBRURUCNTU1qqioUFFRkc6ePastW7Zo//792r17d6ziAxyFHAOswVMDxkVVCHR0dOiee+7RmTNnlJ2drZKSEu3evVu33XZbrOIDHIUcAxBvURUCv/rVr2IVBwCRYwDij3cNAABsh5cOGUchAACwHQoB46JeRwAAANgHhQAAAA5GawAAYDs8PmgcIwIAADgYhQAAAA5GawAAYEPmnhpw0hLDjAgAAOBgjAgAAGyIlw4ZRSEAALAdygDjKAQAALbD44PGMUcAAAAHY0QAAGBDNAeMohAAANgOZYBxtAYAAHAwRgQAADblpP+vHzwKAQCA7fDUgHG0BgAAcDAKAQAAHIzWAADAdlwmXzpk7oVFqYURAQAAHIwRAQCADbGSgFEUAgAA26EMMI5CAABgOzw+aBxzBAAAcDBGBAAANkRzwCgKAQCA7VAGGEdrAAAAB2NEAABgQ4wJGEUhAACwHZ4aMM5Ua2DNmjVyuVxaunSpReEA6EV+Afa1evVqfec739HQoUN1xRVX9HvO6dOnVVlZqaFDhyovL08PPfSQLl68GHHO/v37NXnyZLndbk2YMEGbN2+OOpZBFwKNjY3atGmTSkpKBvslAAyA/ALsraurS3fccYcWLVrU7+e7u7tVWVmprq4uvfPOO3rppZe0efNmLV++PHzOqVOnVFlZqRkzZqilpUVLly7VP//zP2v37t1RxTKoQuDcuXO666679Itf/EIjRowYzJcAMADyCzDPZcFHLK1atUo//OEPdf311/f7+T179ui9997Tr3/9a02aNEkVFRV6/PHHtWHDBnV1dUmSnn/+eY0bN04/+clP9Hd/93davHix/vEf/1E//elPo4plUHMEfD6fKisrNWvWLD3xxBOXPDcYDCoYDIb3/X6/JKmr86K60i8OdBmi4P/de4kOwTYCgYAKCwsVCoUSFgP5hcFK9r8F8cyvQOCsJdcHAoGI4263W26329TXNqKhoUHXX3+98vPzw8fKy8u1aNEiHTt2TDfccIMaGho0a9asiOvKy8ujbidGXQhs3bpVzc3NamxsNHR+bW2tVq1a1ed4YWFhtLcG4uYvf/mLsrOz435f8gtOEMv8ysjIUEFBgb591dWmv9bw4cP75NKKFSu0cuVK01/7m7S1tUUUAZLC+21tbZc8JxAI6PPPP9eQIUMM3SuqQqC1tVVLlixRXV2dMjMzDV1TU1Oj6urq8P5nn32msWPH6vTp0wn5Q2tUb+Xa2toqj8eT6HAGRJzW8vv9KioqUk5OTtzvTX4lH+K0VjzyKzMzU6dOnQoPn5sRCoX6PD1wqdGAZcuW6cknn7zk13z//fd1zTXXmI7NSlEVAk1NTero6NDkyZPDx7q7u1VfX6/169crGAwqPT094pqBhlGys7OT+he2l8fjIU4LpUqcaWnxX2uL/EpexGmtWOdXZmam4WLaSg8++KDmz59/yXPGjx9v6GsVFBTo0KFDEcfa29vDn+v9Z++xr5/j8XgMjwZIURYCt956q44cORJxbMGCBbrmmmv08MMP9/kjBcA48gtIbbm5ucrNzbXka3m9Xq1evVodHR3Ky8uTJNXV1cnj8ai4uDh8zuuvvx5xXV1dnbxeb1T3iqoQyMrK0nXXXRdxbNiwYRo5cmSf4wCiQ34BznH69Gl98sknOn36tLq7u9XS0iJJmjBhgoYPH67Zs2eruLhYd999t5566im1tbXp0Ucflc/nC48C3n///Vq/fr1+9KMf6d5779W+ffv0H//xH3rttdeiiiXuKwu63W6tWLEiLrMuzSBOaxFnfKRK/MRpLeJMPcuXL9dLL70U3r/hhhskSW+++aamT5+u9PR07dixQ4sWLZLX69WwYcNUVVWlxx57LHzNuHHj9Nprr+mHP/yh1q1bpzFjxuiXv/ylysvLo4rFFUrkc1IAACChePsgAAAORiEAAICDUQgAAOBgFAIAADgYhQAAAA5GIQAAgINRCAAA4GAUAgAAOBiFAAAADkYhAACAg1EIAADgYP8fUKgKqA9diqMAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "mask_windows = SwinTransformer.window_partition(mask, window_size) # (n_windows * 1, window_size, window_size, 1)\n", + "fig, axes = plt.subplots(windows_res, windows_res)\n", + "for i in range(windows_res):\n", + " for j in range(windows_res):\n", + " im = axes[i, j].matshow(\n", + " mask_windows.numpy()[i + j + (i * (axes.shape[1] - 1)), :, :, 0], \n", + " cmap=windows_cm, \n", + " extent=[0, res, res, 0], \n", + " vmin=0, \n", + " vmax=len(windows_cm.colors)\n", + " )\n", + "fig.colorbar(im, ax=axes.ravel().tolist())\n", + "\n", + "mask_windows = tf.reshape(mask_windows, [-1, window_size * window_size])\n", + "\n", + "fig, axes = plt.subplots(mask_windows.shape[0])\n", + "for i in range(axes.shape[0]):\n", + " im = axes[i].matshow(\n", + " mask_windows.numpy()[i].reshape((1, -1)), \n", + " cmap=windows_cm,\n", + " extent=[0, res, res, 0], \n", + " vmin=0, \n", + " vmax=len(windows_cm.colors),\n", + " )\n", + "fig.colorbar(im, ax=axes.ravel().tolist())\n", + "\n", + "attn_mask = tf.expand_dims(mask_windows, 1) - tf.expand_dims(mask_windows, 2)\n", + "\n", + "attn_mask = tf.where(attn_mask != 0, -100.0, attn_mask)\n", + "fig, axes = plt.subplots(windows_res, windows_res)\n", + "for i in range(windows_res):\n", + " for j in range(windows_res):\n", + " im = axes[i, j].matshow(\n", + " attn_mask.numpy()[i + j + (i * (axes.shape[1] - 1))],\n", + " extent=[0, res, res, 0],\n", + " cmap=\"Greens\",\n", + " vmin=-100,\n", + " vmax=0\n", + " )\n", + "fig.colorbar(im, ax=axes.ravel().tolist())" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.3" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/pyproject.toml b/pyproject.toml index 07de284..7fd26b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,3 @@ [build-system] -requires = ["setuptools", "wheel"] +requires = ["setuptools"] build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/src/swin/model.py b/src/swin/model.py index aaee273..443d638 100644 --- a/src/swin/model.py +++ b/src/swin/model.py @@ -1,21 +1,34 @@ -"""The Swin model definition module.""" +"""The Swin model definition module. + +Attributes: + DEFAULT_DROP_RATE: Default probability of dropping connections in + ``Dropout`` layers. + DEFAULT_DROP_PATH_RATE: Default maximum probability of entirely skipping a + (Shifted) Windows Multi-head Attention computation (Stochastic Depth + computation-skipping technique) during training. + This maximum value is used in the last stage of the network, while + previous stages use linearly spaced values in the + [0 ,``drop_path_rate``] interval. +""" + +import collections.abc import numpy as np import tensorflow as tf import tensorflow_addons as tfa -import collections.abc -from swin.modules import SwinPatchEmbeddings, SwinStage, SwinLinear +from swin.modules import SwinLinear, SwinPatchEmbeddings, SwinStage + +DEFAULT_DROP_RATE: float = 0.0 +DEFAULT_DROP_PATH_RATE: float = 0.1 class Swin(tf.keras.Model): """Swin Transformer Model. - Some assumptions have been made about this model: + To stay consistent with the architecture described in the paper, this class + assumes the following: - - ``inputs`` must always be a color image (3 channels). - - The size of windows in (Shifted) Windows Multi-head Attention is fixed - to 7. - The ratio of hidden to output neurons in ``SwinMlp`` layers is fixed to 4. - A learnable bias is added to ``queries``, ``keys`` and ``values`` @@ -23,57 +36,54 @@ class Swin(tf.keras.Model): - ``queries`` and ``keys`` are scaled by a factor of ``head_dimension**-0.5``. - No dropout is applied to attention heads. - - The probability of the Stochastic Depth computation-skipping technique - during training is fixed to 0.1. - No absolute position information is included in embeddings. - ``LayerNormalization`` is applied after building patch embeddings. Args: - inputs: The input to be expected by the model. It must describe a batch - of images in the ``channels_last`` format. Images must have height - equal to width (they must be square images). num_classes: The number of classes to predict. It determines the dimension of the output tensor. - patch_size: The size of each patch in which images will be divided into. - embed_dim: The lenght of embeddings built from patches. + patch_size: The size of patches in which images will be divided into. + Expressed in pixels. + window_size: The size of windows in (Shifted) Windows Multi-head + Attention layers expressed in patches per axis. + embed_dim: The length of embeddings built from patches. depths: The number of ``SwinTransformer`` layers in each stage of the network. num_heads: The number of (Shifted) Windows Multi-head Attention heads in each stage of the network. drop_rate: The probability of dropping connections in ``Dropout`` layers. + drop_path_rate: The maximum probability of entirely skipping a (Shifted) + Windows Multi-head Attention computation (Stochastic Depth + computation-skipping technique) during training. + This maximum value is used in the last stage of the network, while + previous stages use linearly spaced values in the + [0 ,``drop_path_rate``] interval. """ def __init__( self, - inputs: tf.Tensor, num_classes: int, patch_size: int = 4, + window_size: int = 7, embed_dim: int = 96, depths: collections.abc.Collection[int] = (2, 2, 6, 2), num_heads: collections.abc.Collection[int] = (3, 6, 12, 24), - drop_rate: float = 0.0, + drop_rate: float = DEFAULT_DROP_RATE, + drop_path_rate: float = DEFAULT_DROP_PATH_RATE, **kwargs, ) -> None: super().__init__(**kwargs) - assert inputs.dtype == tf.float32 - assert inputs.shape[1] == inputs.shape[2] and inputs.shape[3] == 3 - - self.input_shape_list = [ - inputs.shape[0], - inputs.shape[1], - inputs.shape[2], - inputs.shape[3], - ] # When returning this model's config, we only need axes' shapes, not the whole input tensor self.num_classes = num_classes self.patch_size = patch_size + self.window_size = window_size self.embed_dim = embed_dim self.depths = depths self.num_layers = len(self.depths) self.num_heads = num_heads self.drop_rate = drop_rate - self.drop_path_rate = 0.1 + self.drop_path_rate = drop_path_rate self.patch_embed = SwinPatchEmbeddings( self.embed_dim, @@ -81,7 +91,6 @@ def __init__( norm_layer=True, name="patches_linear_embedding", ) - self.patch_embed.compute_output_shape(inputs.shape) self.pos_drop = tf.keras.layers.Dropout(rate=self.drop_rate) @@ -90,7 +99,7 @@ def __init__( # These tensor would then get returned as layer parameters through # calls to their get_config() methods, causing problems in the JSON # serialization as the built-in Python library cannot handle this - # type of objects and thus preventing model saving. + # type of objects, thus preventing model saving. drop_depth_rate = [ x for x in np.linspace( @@ -103,10 +112,9 @@ def __init__( self.blocks = tf.keras.Sequential( [ SwinStage( - input_resolution=self.patch_embed.patches_resolution[0] // (2**i), depth=depths[i], num_heads=num_heads[i], - window_size=7, + window_size=self.window_size, mlp_ratio=4.0, drop_p=drop_rate, drop_path_p=drop_depth_rate[sum(depths[:i]) : sum(depths[: i + 1])], @@ -120,20 +128,24 @@ def __init__( self.norm = tf.keras.layers.LayerNormalization( epsilon=1e-5, name="layer_normalization" ) - self.avgpool = tfa.layers.AdaptiveAveragePooling1D( - 1, name="adaptive_average_pooling" + self.avgpool = tfa.layers.AdaptiveAveragePooling2D( + [1, 1], name="adaptive_average_pooling" ) self.flatten = tf.keras.layers.Flatten(name="flatten") self.head = SwinLinear(num_classes, name="classification_head") + def build(self, input_shape: tf.TensorShape) -> None: + assert input_shape.rank == 4 + assert input_shape[1] == input_shape[2] and input_shape[3] == 3 + def call(self, inputs, **kwargs): x = self.patch_embed(inputs, **kwargs) x = self.pos_drop(x, **kwargs) x = self.blocks(x, **kwargs) + x = self.norm(x, **kwargs) x = self.avgpool(x, **kwargs) x = self.flatten(x, **kwargs) - x = self.head(x, **kwargs) x = tf.nn.softmax(x) @@ -141,27 +153,20 @@ def call(self, inputs, **kwargs): def get_config(self) -> dict: config = { - "input_shape_list": self.input_shape_list, "num_classes": self.num_classes, "patch_size": self.patch_size, + "window_size": self.window_size, "embed_dim": self.embed_dim, "depths": self.depths, "num_heads": self.num_heads, "drop_rate": self.drop_rate, + "drop_path_rate": self.drop_path_rate, } return config - @classmethod - def from_config(cls, config: dict) -> "Swin": - # Since we only have the shape of the input, we build a new random tensor. - # Dtype is fixed to tf.float32. - inputs = tf.random.uniform(config.pop("input_shape_list"), dtype=tf.float32) - - return cls(inputs, **config) - def __repr__(self) -> str: - return f"{self.__class__.__name__}(patch_size={self.patch_size}, embed_dim={self.embed_dim}, depths={self.depths}, num_heads={self.num_heads}, drop_rate={self.drop_rate})" + return f"{self.__class__.__name__}(num_classes={self.num_classes}, patch_size={self.patch_size}, window_size={self.window_size}, embed_dim={self.embed_dim}, depths={self.depths}, num_heads={self.num_heads}, drop_rate={self.drop_rate}, drop_path_rate={self.drop_path_rate})" class SwinT(Swin): @@ -170,47 +175,53 @@ class SwinT(Swin): This version (tiny) uses the following options: - ``patch_size`` = 4 + - ``window_size`` = 7 - ``embed_dim`` = 96 - ``depths`` = (2, 2, 6, 2) - ``num_heads`` = (3, 6, 12, 24) - Some assumptions have been made about this model: + To stay consistent with the architecture described in the paper, this class + assumes the following: - - ``inputs`` must always be a coloured image (3 channels) - - The size of windows in (Shifted) Windows Multi-head Attention is fixed - to 7. - The ratio of hidden to output neurons in ``SwinMlp`` layers is fixed to 4. - A learnable bias is added to ``queries``, ``keys`` and ``values`` when computing (Shifted) Window Multi-head Attention. - ``queries`` and ``keys`` are scaled by a factor of ``head_dimension**-0.5``. - - No dropout is applied to Attention heads. - - The probability of the Stochastic Depth technique is fixed to 0.1. + - No dropout is applied to attention heads. - No absolute position information is included in embeddings. - ``LayerNormalization`` is applied after building patch embeddings. Args: - inputs: The input to be expected by the model. It must describe a batch - of images in the ``channels_last`` format. Images must have height - equal to width (they must be square images). num_classes: The number of classes to predict. It determines the dimension of the output tensor. drop_rate: The probability of dropping connections in ``Dropout`` layers. + drop_path_rate: The maximum probability of entirely skipping a (Shifted) + Windows Multi-head Attention computation (Stochastic Depth + computation-skipping technique) during training. + This maximum value is used in the last stage of the network, while + previous stages use linearly spaced values in the + [0 ,``drop_path_rate``] interval. """ def __init__( - self, inputs: tf.Tensor, num_classes: int, drop_rate: float = 0, **kwargs + self, + num_classes: int, + drop_rate: float = DEFAULT_DROP_RATE, + drop_path_rate: float = DEFAULT_DROP_PATH_RATE, + **kwargs, ) -> None: super().__init__( - inputs, num_classes, patch_size=4, + window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), drop_rate=drop_rate, + drop_path_rate=drop_path_rate, **kwargs, ) @@ -221,47 +232,53 @@ class SwinS(Swin): This version (small) uses the following options: - ``patch_size`` = 4 + - ``window_size`` = 7 - ``embed_dim`` = 96 - ``depths`` = (2, 2, 18, 2) - ``num_heads`` = (3, 6, 12, 24) - Some assumptions have been made about this model: + To stay consistent with the architecture described in the paper, this class + assumes the following: - - ``inputs`` must always be a coloured image (3 channels) - - The size of windows in (Shifted) Windows Multi-head Attention is fixed - to 7. - The ratio of hidden to output neurons in ``SwinMlp`` layers is fixed to 4. - A learnable bias is added to ``queries``, ``keys`` and ``values`` when computing (Shifted) Window Multi-head Attention. - ``queries`` and ``keys`` are scaled by a factor of ``head_dimension**-0.5``. - - No dropout is applied to Attention heads. - - The probability of the Stochastic Depth technique is fixed to 0.1. + - No dropout is applied to attention heads. - No absolute position information is included in embeddings. - ``LayerNormalization`` is applied after building patch embeddings. Args: - inputs: The input to be expected by the model. It must describe a batch - of images in the ``channels_last`` format. Images must have height - equal to width (they must be square images). num_classes: The number of classes to predict. It determines the dimension of the output tensor. drop_rate: The probability of dropping connections in ``Dropout`` layers. + drop_path_rate: The maximum probability of entirely skipping a (Shifted) + Windows Multi-head Attention computation (Stochastic Depth + computation-skipping technique) during training. + This maximum value is used in the last stage of the network, while + previous stages use linearly spaced values in the + [0 ,``drop_path_rate``] interval. """ def __init__( - self, inputs: tf.Tensor, num_classes: int, drop_rate: float = 0, **kwargs + self, + num_classes: int, + drop_rate: float = DEFAULT_DROP_RATE, + drop_path_rate: float = DEFAULT_DROP_PATH_RATE, + **kwargs, ) -> None: super().__init__( - inputs, num_classes, patch_size=4, + window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), drop_rate=drop_rate, + drop_path_rate=drop_path_rate, **kwargs, ) @@ -272,47 +289,53 @@ class SwinB(Swin): This version (base) uses the following options: - ``patch_size`` = 4 + - ``window_size`` = 7 - ``embed_dim`` = 128 - ``depths`` = (2, 2, 18, 2) - ``num_heads`` = (4, 8, 16, 32) - Some assumptions have been made about this model: + To stay consistent with the architecture described in the paper, this class + assumes the following: - - ``inputs`` must always be a coloured image (3 channels) - - The size of windows in (Shifted) Windows Multi-head Attention is fixed - to 7. - The ratio of hidden to output neurons in ``SwinMlp`` layers is fixed to 4. - A learnable bias is added to ``queries``, ``keys`` and ``values`` when computing (Shifted) Window Multi-head Attention. - ``queries`` and ``keys`` are scaled by a factor of ``head_dimension**-0.5``. - - No dropout is applied to Attention heads. - - The probability of the Stochastic Depth technique is fixed to 0.1. + - No dropout is applied to attention heads. - No absolute position information is included in embeddings. - ``LayerNormalization`` is applied after building patch embeddings. Args: - inputs: The input to be expected by the model. It must describe a batch - of images in the ``channels_last`` format. Images must have height - equal to width (they must be square images). num_classes: The number of classes to predict. It determines the dimension of the output tensor. drop_rate: The probability of dropping connections in ``Dropout`` layers. + drop_path_rate: The maximum probability of entirely skipping a (Shifted) + Windows Multi-head Attention computation (Stochastic Depth + computation-skipping technique) during training. + This maximum value is used in the last stage of the network, while + previous stages use linearly spaced values in the + [0 ,``drop_path_rate``] interval. """ def __init__( - self, inputs: tf.Tensor, num_classes: int, drop_rate: float = 0, **kwargs + self, + num_classes: int, + drop_rate: float = DEFAULT_DROP_RATE, + drop_path_rate: float = DEFAULT_DROP_PATH_RATE, + **kwargs, ) -> None: super().__init__( - inputs, num_classes, patch_size=4, + window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), drop_rate=drop_rate, + drop_path_rate=drop_path_rate, **kwargs, ) @@ -323,46 +346,52 @@ class SwinL(Swin): This version (large) uses the following options: - ``patch_size`` = 4 + - ``window_size`` = 7 - ``embed_dim`` = 192 - ``depths`` = (2, 2, 18, 2) - ``num_heads`` = (6, 12, 24, 48) - Some assumptions have been made about this model: + To stay consistent with the architecture described in the paper, this class + assumes the following: - - ``inputs`` must always be a coloured image (3 channels) - - The size of windows in (Shifted) Windows Multi-head Attention is fixed - to 7. - The ratio of hidden to output neurons in ``SwinMlp`` layers is fixed to 4. - A learnable bias is added to ``queries``, ``keys`` and ``values`` when computing (Shifted) Window Multi-head Attention. - ``queries`` and ``keys`` are scaled by a factor of ``head_dimension**-0.5``. - - No dropout is applied to Attention heads. - - The probability of the Stochastic Depth technique is fixed to 0.1. + - No dropout is applied to attention heads. - No absolute position information is included in embeddings. - ``LayerNormalization`` is applied after building patch embeddings. Args: - inputs: The input to be expected by the model. It must describe a batch - of images in the ``channels_last`` format. Images must have height - equal to width (they must be square images). num_classes: The number of classes to predict. It determines the dimension of the output tensor. drop_rate: The probability of dropping connections in ``Dropout`` layers. + drop_path_rate: The maximum probability of entirely skipping a (Shifted) + Windows Multi-head Attention computation (Stochastic Depth + computation-skipping technique) during training. + This maximum value is used in the last stage of the network, while + previous stages use linearly spaced values in the + [0 ,``drop_path_rate``] interval. """ def __init__( - self, inputs: tf.Tensor, num_classes: int, drop_rate: float = 0, **kwargs + self, + num_classes: int, + drop_rate: float = DEFAULT_DROP_RATE, + drop_path_rate: float = DEFAULT_DROP_PATH_RATE, + **kwargs, ) -> None: super().__init__( - inputs, num_classes, patch_size=4, + window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), drop_rate=drop_rate, + drop_path_rate=drop_path_rate, **kwargs, ) diff --git a/src/swin/modules.py b/src/swin/modules.py index 0dd2d04..f510c34 100644 --- a/src/swin/modules.py +++ b/src/swin/modules.py @@ -1,6 +1,7 @@ """Modules used by the Swin Transformer.""" import collections.abc + import numpy as np import tensorflow as tf @@ -18,7 +19,7 @@ class SwinLinear(tf.keras.layers.Dense): use_bias: Whether the layer uses a bias vector. """ - def __init__(self, units: int, use_bias=True, **kwargs) -> None: + def __init__(self, units: int, use_bias: bool = True, **kwargs) -> None: super().__init__( units, activation=tf.keras.activations.linear, @@ -38,7 +39,7 @@ class SwinPatchEmbeddings(tf.keras.layers.Layer): Args: embed_dim: Dimension of output embeddings. - patch_size: Size of axes of image patches, expressed in pixels. + patch_size: Height/width of patches, expressed in pixels. norm_layer: Whether to apply layer normalization or not. """ @@ -79,25 +80,21 @@ def build(self, input_shape: tf.TensorShape) -> None: ) self.num_patches = self.patches_resolution[0] * self.patches_resolution[1] - self.flatten = tf.keras.layers.Reshape((-1, self.embed_dim)) - def call(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor: """Build embeddings for every patch of the image. Args: - inputs: A batch of images with shape (batch_size, height, width, - channels). + inputs: A batch of images with shape ``(batch_size, height, width, channels)``. + ``height`` and ``width`` must be identical. Returns: - Embeddings, having shape ``(batch_size, num_patches, embed_dim)``. + Embeddings, having shape ``(batch_size, height / patch_size, + width / patch_size, embed_dim)``. """ - x = tf.ensure_shape(inputs, [None, None, None, 3]) - - x = self.proj(x, **kwargs) - x = self.flatten(x, **kwargs) + x = self.proj(inputs, **kwargs) - if self.norm: + if self.norm is not None: x = self.norm(x, **kwargs) return x @@ -125,90 +122,76 @@ class SwinPatchMerging(tf.keras.layers.Layer): patches. """ - def __init__(self, input_resolution: int, **kwargs) -> None: - # NOTE: Changed input_resolution from tuple to int - + def __init__(self, **kwargs) -> None: super().__init__(**kwargs) - assert input_resolution % 2 == 0 - self.input_resolution = input_resolution - self.norm = tf.keras.layers.LayerNormalization(epsilon=1e-5) def build(self, input_shape: tf.TensorShape): - self.reduction = SwinLinear(input_shape[-1] * 2, use_bias=False) + assert input_shape.rank == 4 + assert input_shape[1] == input_shape[2] + assert input_shape[1] % 2 == 0 + + self.reduction = SwinLinear(input_shape[3] * 2, use_bias=False) def call(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor: - """Perform the merging of patches. + """Merge groups of 4 neighbouring of patches. - The merge is performed on groups of 4 neighbouring patches. + This layer concatenates the features of groups of 4 neighbouring patches + and project the concatenation into a space twice the length of the + original feature space. Args: - inputs: Tensor of patches, with shape ``(batch_size, - num_patches, embed_dim)`` with - ``num_patches = input_resolution * input_resolution``. + inputs: Tensor of patches, with shape + ``(batch_size, height_patches, width_patches, embed_dim)`` with + ``height_patches`` must be equal to ``width_patches``. Returns: - Embeddings of merged patches, with shape ``(batch_size, num_patches / 4, 2 * embed_dim)``. + Embeddings of merged patches, with shape ``(batch_size, + heigth_patches /2, width_patches / 2, 2 * embed_dim)``. """ - tf.assert_equal(inputs.dtype, tf.float32, "Inputs must be a tf.float32 tensor.") - x = tf.ensure_shape(inputs, [None, self.input_resolution**2, None]) - - shape = tf.shape(inputs) - batch = shape[0] - channels = shape[2] - - x = tf.reshape( - x, [batch, self.input_resolution, self.input_resolution, channels] - ) - - x0 = x[:, 0::2, 0::2, :] - x1 = x[:, 1::2, 0::2, :] - x2 = x[:, 0::2, 1::2, :] - x3 = x[:, 1::2, 1::2, :] - - x = tf.concat([x0, x1, x2, x3], axis=-1) - x = tf.reshape(x, [batch, -1, 4 * channels]) + x = tf.concat( + [ + inputs[:, 0::2, 0::2, :], + inputs[:, 1::2, 0::2, :], + inputs[:, 0::2, 1::2, :], + inputs[:, 1::2, 1::2, :], + ], + axis=-1, + ) # [batch_size, height_patches / 2, width_patches / 2, 4 * embed_dim] x = self.norm(x, **kwargs) - x = self.reduction(x, **kwargs) + x = self.reduction( + x, **kwargs + ) # [batch_size, height_patches / 2, width_patches / 2, 2 * embed_dim] return x - def get_config(self) -> dict: - config = super().get_config() - config.update({"input_resolution": self.input_resolution}) - return config - def __repr__(self) -> str: - return f"{self.__class__.__name__}(input_resolution={self.input_resolution})" + return f"{self.__class__.__name__}()" class SwinStage(tf.keras.layers.Layer): """Stage of the Swin Network. Args: - input_resolution: The resolution of axes of the input, expressed in - number of patches. - depth: Number of SwinTransformer layers in the stage. - num_heads: Number of attention heads in each SwinTransformer layer. - window_size: The size of windows in which embeddings gets split into, - expressed in numer of patches. + depth: Number of ``SwinTransformer`` layers in the stage. + num_heads: Number of attention heads in each ``SwinTransformer`` layer. + window_size: The size of window axes expressed in patches. mlp_ratio: The ratio between the size of the hidden layer and the size - of the output layer in SwinMlp layers. - drop_p: The probability of dropping connections in a SwinTransformer + of the output layer in ``SwinMlp`` layers. + drop_p: The probability of dropping connections in a ``SwinTransformer`` layer during training. drop_path_p: The proabability of entirely skipping the computation of (Shifted) Windows Multi-head Self Attention during training (Stochastic Depth technique). - downsample: Whether or not to apply downsampling at the end of the - layer. + downsample: Whether or not to apply downsampling through a + ``SwinPatchMerging`` layer at the end of the stage. """ def __init__( self, - input_resolution: int, depth: int, num_heads: int, window_size: int, @@ -220,19 +203,17 @@ def __init__( ) -> None: super().__init__(**kwargs) - self.input_resolution = input_resolution self.depth = depth self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.drop_p = drop_p self.drop_path_p = drop_path_p - self.donwsample = downsample + self.downsample = downsample self.core = tf.keras.Sequential( [ SwinTransformer( - resolution=self.input_resolution, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0) else window_size // 2, @@ -246,29 +227,32 @@ def __init__( ] ) - if downsample: - self.downsample_layer = SwinPatchMerging(self.input_resolution) - else: - self.downsample_layer = None + self.downsample_layer = SwinPatchMerging() if downsample else None + + def build(self, input_shape: tf.TensorShape): + assert ( + input_shape.rank == 4 + ) # Must be batch_size, height_patches, width_patches, embed_dim + assert input_shape[1] == input_shape[2] def call(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor: """Apply transformations of the Swin stage to patches. Args: - inputs: The input patches to the Swin stage, having shape `` - (batch_size, num_patches, embed_dim)``. + inputs: The input patches to the Swin stage, having shape + ``(batch_size, height_patches, width_patches, embed_dim)``. + ``height_patches`` must be equal to ``width_patches``. Returns: - Transformed patches with shape ``(batch_size, num_patches / 4, - embed_dim * 2)`` if ``downsample == True`` or ``(batch_size, - num_patches, embed_dim)`` if ``downsample == False``. + Transformed patches with shape ``(batch_size, height_patches / 2, + width_patches / 2, embed_dim * 2)`` if ``downsample == True`` + or ``(batch_size, height_patches, width_patches, embed_dim)`` + if ``downsample == False``. """ - x = tf.ensure_shape(inputs, [None, None, None]) - - x = self.core(x, **kwargs) + x = self.core(inputs, **kwargs) - if self.donwsample: + if self.downsample: x = self.downsample_layer(x, **kwargs) return x @@ -277,28 +261,25 @@ def get_config(self) -> dict: config = super().get_config() config.update( { - "input_resolution": self.input_resolution, "depth": self.depth, "num_heads": self.num_heads, "window_size": self.window_size, "mlp_ratio": self.mlp_ratio, "drop_p": self.drop_p, "drop_path_p": self.drop_path_p, - "downsample": self.donwsample, + "downsample": self.downsample, } ) return config def __repr__(self) -> str: - return f"{self.__class__.__name__}(input_resolution={self.input_resolution}, depth={self.depth}, num_heads={self.num_heads}, window_size={self.window_size}, mlp_ratio={self.mlp_ratio}, drop_p={self.drop_p}, drop_path_p={self.drop_path_p}, downsample={self.donwsample})" + return f"{self.__class__.__name__}(depth={self.depth}, num_heads={self.num_heads}, window_size={self.window_size}, mlp_ratio={self.mlp_ratio}, drop_p={self.drop_p}, drop_path_p={self.drop_path_p}, downsample={self.downsample})" class SwinWindowAttention(tf.keras.layers.Layer): """Swin (Shifted) Window Multi-head Self Attention Layer. Args: - window_size: The size of windows in which embeddings gets divided into, - expressed in patches. num_heads: The number of attention heads. proj_drop_r: The ratio of output weights that randomly get dropped during training. @@ -306,42 +287,71 @@ class SwinWindowAttention(tf.keras.layers.Layer): def __init__( self, - window_size: int, num_heads: int, proj_drop_r: float = 0.0, **kwargs, ) -> None: super().__init__(**kwargs) - self.window_size = window_size self.num_heads = num_heads self.proj_drop_r = proj_drop_r - # TODO: Change into TF calls to get rid of numpy - coords_h = range(self.window_size) - coords_w = range(self.window_size) - coords = np.stack(np.meshgrid(coords_h, coords_w, indexing="ij")) - coords_flat = np.reshape(coords, [coords.shape[0], -1]) - relative_coords = coords_flat[:, :, None] - coords_flat[:, None, :] - relative_coords = np.transpose(relative_coords, [1, 2, 0]) - relative_coords[:, :, 0] += self.window_size - 1 - relative_coords[:, :, 1] += self.window_size - 1 - relative_coords[:, :, 0] *= 2 * self.window_size - 1 - relative_position_index = relative_coords.sum(-1) - - self.relative_position_index = tf.Variable( - initial_value=tf.convert_to_tensor(relative_position_index), - trainable=False, - name="relative_position_index", - ) - self.proj_drop = tf.keras.layers.Dropout(self.proj_drop_r) self.softmax = tf.keras.layers.Softmax(-1) + @classmethod + def build_relative_position_index(cls, window_size: int) -> tf.Tensor: + """Build the table of relative position indices. + + This table is used as an index to the relative position table. For each + pair of tokens in a window, this table allows to get the index in the + relative position table. + + Args: + window_size: The size of windows (expressed in patches) used during + the (S)W-MSA. + + Returns: + A ``Tensor`` with shape ``(window_size**2, window_size**2)`` + representing indices in the relative position table for each pair of + patches in the window. + """ + + coords = tf.range(0, window_size) + coords = tf.stack(tf.meshgrid(coords, coords, indexing="ij")) + coords = tf.reshape(coords, [tf.shape(coords)[0], -1]) + + rel_coords = tf.expand_dims(coords, 2) - tf.expand_dims( + coords, 1 + ) # Make values relative + rel_coords = tf.transpose(rel_coords, [1, 2, 0]) + + rel_coords = tf.Variable(rel_coords) + + rel_coords[:, :, 0].assign( + rel_coords[:, :, 0] + window_size - 1 + ) # Add offset to values + rel_coords[:, :, 1].assign(rel_coords[:, :, 1] + window_size - 1) + + rel_coords[:, :, 0].assign( + rel_coords[:, :, 0] * (2 * window_size - 1) + ) # Shift values so indices for different patches do not share the same value + + rel_pos_index = tf.reduce_sum(rel_coords, -1) + + return rel_pos_index + def build(self, input_shape: tf.TensorShape) -> None: - channels = input_shape[-1] + assert input_shape.rank == 5 + assert input_shape[2] == input_shape[3] + assert ( + input_shape[4] % self.num_heads == 0 + ) # embeddings dimension must be evenly divisible by the number of attention heads - self.head_dim = channels // self.num_heads + self.window_size = input_shape[2] + embed_dim = input_shape[4] + + self.head_dim = embed_dim // self.num_heads self.scale = self.head_dim**-0.5 # In the paper, sqrt(d) # The official implementation uses a custom function which defaults @@ -359,8 +369,17 @@ def build(self, input_shape: tf.TensorShape) -> None: trainable=True, ) - self.qkv = SwinLinear(channels * 3) - self.proj = SwinLinear(channels) + self.relative_position_index = tf.Variable( + initial_value=tf.reshape( + SwinWindowAttention.build_relative_position_index(self.window_size), + [-1], + ), # Flatten the matrix so it can be used to index the relative_position_bias_table in the forward pass + trainable=False, + name="relative_position_index", + ) + + self.qkv = SwinLinear(embed_dim * 3) + self.proj = SwinLinear(embed_dim) def call( self, inputs: tf.Tensor, mask: tf.Tensor | None = None, **kwargs @@ -368,8 +387,8 @@ def call( """Perform (Shifted) Window MSA. Args: - inputs: Embeddings with shape ``(num_windows * batch_size, - window_size * window_size, embed_dim)``. ``embed_dim`` must be + inputs: Embeddings with shape ``(batch_size, num_windows, + window_size, window_size, embed_dim)``. ``embed_dim`` must be exactly divisible by ``num_heads``. mask: Attention mask used used to perform Shifted Window MSA, having shape ``(num_windows, window_size * window_size, window_size * window_size)`` and values {0, -inf}. @@ -379,25 +398,21 @@ def call( input. """ - x = tf.ensure_shape(inputs, [None, self.window_size**2, None]) - shape = tf.shape(inputs) - batch_windows = shape[0] - window_dim = shape[1] - embed_dim = shape[2] - - tf.assert_equal( - embed_dim % self.num_heads, - 0, - "Provided input dimension 3 (embed_dim) is not evenly divisible by the number of attention heads.", - ) + batch_windows = shape[0] * shape[1] + window_dim = shape[2] * shape[3] + embed_dim = shape[4] - qkv = self.qkv(x, **kwargs) + x = tf.reshape(inputs, [batch_windows, window_dim, embed_dim]) + + qkv = self.qkv(x, **kwargs) # [batch_windows, window_dim, 3 * embed_dim] qkv = tf.reshape( qkv, - [batch_windows, window_dim, 3, self.num_heads, embed_dim // self.num_heads], + [batch_windows, window_dim, 3, self.num_heads, self.head_dim], ) - qkv = tf.transpose(qkv, [2, 0, 3, 1, 4]) + qkv = tf.transpose( + qkv, [2, 0, 3, 1, 4] + ) # [3, batch_windows, num_heads, window_dim, head_dim] q = qkv[0] k = qkv[1] @@ -405,40 +420,58 @@ def call( q = q * self.scale - attn = tf.matmul(q, tf.transpose(k, [0, 1, 3, 2])) + attn = tf.matmul( + q, k, transpose_b=True + ) # [batch_windows, num_heads, window_dim, window_dim] - indices = tf.reshape(self.relative_position_index, [-1]) - relative_position_bias = tf.gather(self.relative_position_bias_table, indices) + relative_position_bias = tf.gather( + self.relative_position_bias_table, self.relative_position_index + ) # [window_dim**2, num_heads] relative_position_bias = tf.reshape( relative_position_bias, [window_dim, window_dim, -1] ) - relative_position_bias = tf.transpose(relative_position_bias, [2, 0, 1]) + relative_position_bias = tf.transpose( + relative_position_bias, [2, 0, 1] + ) # [num_heads, window_dim, window_dim] - attn = attn + tf.expand_dims(relative_position_bias, axis=0) + attn = attn + tf.expand_dims( + relative_position_bias, axis=0 + ) # [batch_windows, num_heads, window_dim, window_dim] if mask is not None: - nW = tf.shape(mask)[0] + num_windows = tf.shape(mask)[0] attn = tf.reshape( - attn, [batch_windows // nW, nW, self.num_heads, window_dim, window_dim] - ) + attn, + [ + batch_windows // num_windows, + num_windows, + self.num_heads, + window_dim, + window_dim, + ], + ) # Expand to [batch_size, num_windows, num_heads, window_dim, windo_dim] in order to sum the attention mask attn = attn + tf.expand_dims(tf.expand_dims(mask, axis=1), axis=0) - attn = tf.reshape(attn, [-1, self.num_heads, window_dim, window_dim]) + attn = tf.reshape( + attn, [-1, self.num_heads, window_dim, window_dim] + ) # Back to [batch_windows, num_heads, window_dim, window_dim] attn = self.softmax(attn, **kwargs) - x = tf.matmul(attn, v) - x = tf.transpose(x, [0, 2, 1, 3]) - x = tf.reshape(x, [batch_windows, window_dim, embed_dim]) - x = self.proj(x, **kwargs) - x = self.proj_drop(x, **kwargs) + attn = tf.matmul(attn, v) + attn = tf.transpose(attn, [0, 2, 1, 3]) + attn = tf.reshape(attn, [batch_windows, window_dim, embed_dim]) - return x + attn = self.proj(attn, **kwargs) + attn = self.proj_drop(attn, **kwargs) + + attn = tf.reshape(attn, tf.shape(inputs)) + + return attn def get_config(self) -> dict: config = super().get_config() config.update( { - "window_size": self.window_size, "num_heads": self.num_heads, "proj_drop_r": self.proj_drop_r, } @@ -446,23 +479,40 @@ def get_config(self) -> dict: return config def __repr__(self) -> str: - return f"{self.__class__.__name__}(window_size={self.window_size}, num_heads={self.num_heads}, proj_drop_r={self.proj_drop_r})" + return f"{self.__class__.__name__}(num_heads={self.num_heads}, proj_drop_r={self.proj_drop_r})" class SwinDropPath(tf.keras.layers.Layer): - """Stochastic Depth Layer. + """Stochastic per-sample layer drop. + + This is an implementation of the stochastic depth technique described in the + "Deep Networks with Stochastic Depth" paper by Huang et al. + (https://arxiv.org/pdf/1603.09382.pdf). + + Examples in a batch have a probability to have their values set to 0. + This is useful in conjunction with residual paths, as adding the residual + connection with 0 yields the original example, as if other computations + never took place in the main path. Args: - drop_prob: The probability of entirely skipping the output of the - computation. + drop_prob: The probability of entirely skipping the layer. """ def __init__(self, drop_prob: float = 0.0, **kwargs) -> None: super().__init__(**kwargs) + assert drop_prob >= 0 and drop_prob <= 1 + self.drop_prob = drop_prob self.keep_prob = 1 - self.drop_prob + def build(self, input_shape: tf.TensorShape) -> None: + # We want to get a rank-1 tensor, with tf.rank(inputs) values all set to + # 1 except for the first one, identical to the batch size. + # e.g. [4, 1, 1, 1]. + self.shape = tf.ones([input_shape.rank], dtype=tf.int32) + self.shape = tf.tensor_scatter_nd_update(self.shape, [[0]], [input_shape[0]]) + def call( self, inputs: tf.Tensor, training: tf.Tensor = None, **kwargs ) -> tf.Tensor: @@ -472,8 +522,8 @@ def call( inputs: The input data. The first dimension is assumed to be the ``batch_size``. training: Whether the forward pass is happening at training time - or not. During inference (``training`` = False) ``inputs`` is - returned as-is. + or not. During inference (``training = False``) ``inputs`` is + returned as-is (i.e. no drops). Returns: The input tensor with some values randomly set to 0. @@ -482,21 +532,9 @@ def call( if self.drop_prob == 0 or not training: return inputs - first_axis = tf.expand_dims(tf.shape(inputs)[0], axis=0) - other_axis = tf.repeat( - 1, tf.rank(inputs) - 1 - ) # Rank-1 tensor with (rank(inputs) - 1) axes, all having value 1 - - # We want to get a rank-1 tensor with 1 as the value of all axes except - # for the first one, identical to the batch size - shape = tf.concat( - [first_axis, other_axis], - axis=0, - ) - rand_tensor = tf.constant(self.keep_prob, dtype=inputs.dtype) rand_tensor = rand_tensor + tf.random.uniform( - shape, maxval=1.0, dtype=inputs.dtype + self.shape, maxval=1.0, dtype=inputs.dtype ) rand_tensor = tf.floor(rand_tensor) @@ -535,20 +573,22 @@ def __init__( self.fc2 = SwinLinear(self.out_features) self.drop = tf.keras.layers.Dropout(self.drop_p) + def build(self, input_shape: tf.TensorShape) -> None: + assert input_shape.rank == 4 + def call(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor: """Apply the transformations of the MLP. Args: - inputs: The input data, having shape ``(batch_size, num_patches, - embed_size)``. + inputs: The input data, having shape + ``(batch_size, height_patches, width_patches, embed_size)``. Returns: - The transformed inputs, with shape ``(batch_size, num_patches, - out_features)``. + The transformed inputs, with shape + ``(batch_size, num_patches, out_features)``. """ - x = tf.ensure_shape(inputs, [None, None, None]) - x = self.fc1(x, **kwargs) + x = self.fc1(inputs, **kwargs) x = tf.nn.gelu(x) x = self.drop(x, **kwargs) x = self.fc2(x, **kwargs) @@ -562,6 +602,7 @@ def get_config(self) -> dict: { "hidden_features": self.hidden_features, "out_features": self.out_features, + "drop_p": self.drop_p, } ) return config @@ -574,25 +615,20 @@ class SwinTransformer(tf.keras.layers.Layer): """Swin Transformer Layer. Args: - resolution: The input resolution expressed in number of patches per - axis. Both axis share the same resolution as the orginal image - must be a square. - num_heads: The number of Shifted Window Attention heads. - window_size: The size of windows in which the image gets partitioned - into, expressed in patches. + num_heads: The number of (Shifted) Window Attention heads. + window_size: The size of window axes, expressed in patches. shift_size: The value of shifting applied to windows, expressed in patches. mlp_ratio: The ratio between the size of the hidden layer and the - size of the output layer in SwinMlp. - drop_p: The probability of dropping connections in Dropout layers during - training. + size of the output layer in ``SwinMlp``. + drop_p: The probability of dropping connections in ``Dropout`` layers + during training. drop_path_p: The probability of entirely skipping a (Shifted) Windows Multi-head Self Attention computation during training. """ def __init__( self, - resolution: int, num_heads: int, window_size: int, shift_size: int, @@ -603,7 +639,6 @@ def __init__( ) -> None: super().__init__(**kwargs) - self.resolution = resolution self.num_heads = num_heads self.window_size = window_size self.shift_size = shift_size @@ -611,82 +646,34 @@ def __init__( self.drop_p = drop_p self.drop_path_p = drop_path_p - if self.resolution <= self.window_size: - self.shift_size = 0 - self.window_size = self.resolution - - # Resolution must be evenly divisible by the window size or reshape - # operations will not work - assert self.resolution % self.window_size == 0 - - assert 0 <= self.shift_size < self.window_size - self.norm_1 = tf.keras.layers.LayerNormalization(epsilon=1e-5) - self.attention = SwinWindowAttention( - self.window_size, self.num_heads, proj_drop_r=drop_p - ) + self.attention = SwinWindowAttention(self.num_heads, proj_drop_r=drop_p) # When drop_path_p == 0 SwinDropPath simply returns the same value self.drop_path = SwinDropPath(self.drop_path_p) self.norm_2 = tf.keras.layers.LayerNormalization(epsilon=1e-5) - if self.shift_size > 0: - attn_mask = self.build_attn_mask( - self.resolution, - self.window_size, - self.shift_size, - ) - - self.attn_mask = tf.Variable( - initial_value=attn_mask, - trainable=False, - name="attention_mask", - ) - else: - self.attn_mask = None - @classmethod - @tf.function( - input_signature=[ - tf.TensorSpec(shape=[None, None, None, None], dtype=tf.float32), - tf.TensorSpec(shape=[], dtype=tf.int32), - ], - ) def window_partition(cls, patches: tf.Tensor, window_size: tf.Tensor) -> tf.Tensor: """Partition a batch of images into windows. - .. Note:: - - This method may throw warnings due to an excessive number of - retracing operations. - However, due to it being used in the forward pass of the full - model, keeping it decorated as a ``tf.function`` should still prove - to be beneficial. - Args: - patches: Patch embeddings for a batch of images to partition - into windows, having shape ``(batch_size, num_patches_h, - num_patches_w, embed_dim)``. ``num_patches_h == num_patches_w``. - window_size: The size of each window, expressed in patches. + patches: A batch of patch embeddings to partition into windows, + having shape ``(batch_size, num_patches_h, num_patches_w, + embed_dim)``. + window_size: The size of each window, expressed in patches along + each axis. Returns: - A tensor of windows having shape ``(n * batch_size, window_size, + A tensor of windows having shape ``(batch_size, n, window_size, window_size, embed_dim)``, where ``n`` is the number of resulting windows. """ - x = tf.ensure_shape(patches, [None, None, None, None]) - window_size = tf.ensure_shape(window_size, []) - - shape = tf.shape(x) - tf.assert_equal( - shape[1], - shape[2], - "The number of patches in the height dimension must be equal to the number of patches in the width dimension (patches must be squared).", - ) + shape = tf.shape(patches) windows = tf.reshape( - x, + patches, [ shape[0], shape[1] // window_size, @@ -697,104 +684,52 @@ def window_partition(cls, patches: tf.Tensor, window_size: tf.Tensor) -> tf.Tens ], ) windows = tf.transpose(windows, [0, 1, 3, 2, 4, 5]) - windows = tf.reshape(windows, [-1, window_size, window_size, shape[3]]) + windows = tf.reshape( + windows, [shape[0], -1, window_size, window_size, shape[3]] + ) return windows @classmethod - @tf.function - def window_reverse(cls, windows: tf.Tensor, patch_size: tf.Tensor) -> tf.Tensor: + def window_reverse(cls, windows: tf.Tensor, resolution: tf.Tensor) -> tf.Tensor: """Reverse the partitioning of a batch of patches into windows. + .. Note: + ``resolution`` is expected to be a multiple of the size of windows. + No checks are performed to ensure this holds. + Args: - windows: Partitioned windows to reverse, with shape ``(batch_size * - num_windows, window_size, window_size, embed_dim)``. - patch_size: Number of patches per axis in the original image. + windows: Partitioned windows to reverse, with shape + ``(batch_size, num_windows, window_size, window_size, embed_dim)``. + resolution: Number of patches per axis in the original feature map. Returns: A tensor of patches of the batch recreated from ``windows``, with - shape ``(batch_size, patch_size, patch_size, embed_dim)``. + shape ``(batch_size, resolution, resolution, embed_dim)``. """ - x = tf.ensure_shape(windows, [None, None, None, None]) - - tf.assert_equal( - tf.shape(x)[1], - tf.shape(x)[2], - "Dimension 1 and dimension 2 of 'windows' must be identical.", - ) - window_size = tf.shape(x)[1] + shape = tf.shape(windows) - # TODO: simplify - b = tf.cast(tf.shape(x)[0], tf.float64) # Casting to prevent type mismatch - d = patch_size**2 / window_size / tf.cast(window_size, tf.float64) - batch_size = tf.cast(b / d, tf.int32) + batch_size = shape[0] + window_size = shape[2] + embed_dim = shape[4] x = tf.reshape( - x, + windows, [ batch_size, - patch_size // window_size, - patch_size // window_size, + resolution // window_size, + resolution // window_size, window_size, window_size, - -1, + embed_dim, ], ) x = tf.transpose(x, [0, 1, 3, 2, 4, 5]) - x = tf.reshape(x, [batch_size, patch_size, patch_size, -1]) + x = tf.reshape(x, [batch_size, resolution, resolution, embed_dim]) return x - @classmethod - def masked_fill( - cls, tensor: tf.Tensor, mask: tf.Tensor, value: tf.Tensor - ) -> tf.Tensor: - """Fill elements of ``tensor`` with ``value`` where ``mask`` is True. - - This function returns a new tensor having the same values as ``tensor`` - except for those where ``mask`` contained the value True; these values are - replaced with ``value``. - - It mimics ``torch.tensor.masked_fill()``. - - ``mask`` must have identical shape to ``tensor`` and ``value`` must be a - scalar tensor. - ``value`` is cast to the type of ``tensor`` if their types don't match. - - Args: - tensor: The tensor to fill with ``value`` where ``mask`` is True. - mask: The mask to apply to ``tensor``. - value: The value to fill ``tensor`` with. - - Returns: - A copy of ``tensor`` with elements changed to ``value`` where - ``mask`` was ``True``. - """ - - tf.assert_equal( - tf.shape(tensor), - tf.shape(mask), - "The shape of tensor must match the shape of mask.", - ) - tf.assert_equal(tf.rank(value), 0, "'value' must be a scalar tensor.") - - if value.dtype != tensor.dtype: - value = tf.cast(value, tensor.dtype) - - indices = tf.where(mask) - - filled_tensor = tf.tensor_scatter_nd_update( - tensor, - indices, - tf.broadcast_to( - value, - [tf.shape(indices)[0]], - ), - ) - - return filled_tensor - @classmethod def build_attn_mask(cls, size: int, window_size: int, shift_size: int): """Build an attention mask for the Shifted Window MSA. @@ -808,7 +743,10 @@ def build_attn_mask(cls, size: int, window_size: int, shift_size: int): The computed attention mask, with shape ``(num_windows, window_size * window_size, window_size * window_size)``. """ - # TODO: Change mask creation to ditch numpy + # While possible to build the mask only through TensorFlow operations, + # it would result in a much less readable method.Since Numpy is already + # a TensorFlow dependency and this method is only called during this + # layer's initialization, using it to build the mask is fine. mask = np.zeros( [1, size, size, 1], dtype=np.float32 ) # Force type so we get a tf.float32 tensor as the output of this method. @@ -831,19 +769,62 @@ def build_attn_mask(cls, size: int, window_size: int, shift_size: int): mask_windows = SwinTransformer.window_partition( tf.convert_to_tensor(mask), tf.constant(window_size) - ) - mask_windows = tf.reshape(mask_windows, [-1, window_size * window_size]) + ) # mask_windows.shape = [n, window_size, window_size, 1]. + mask_windows = tf.reshape( + mask_windows, [-1, window_size * window_size] + ) # mask_windows.shape = [n, window_size**2], we flatten windows. + + # We need to create a mask which, for each patch in each window, tells + # us if the attention mechanism should be calculated for every other + # patch in the same window. + # This means a mask with shape [n, window_size**2, window_size**2]. + # Subtracting the two expanded mask_windows gives us a tensor with the + # right shape and values equal to zero where two patches are adjacent in + # the original feature map (meaning attention should be calculated). attn_mask = tf.expand_dims(mask_windows, 1) - tf.expand_dims(mask_windows, 2) - attn_mask = SwinTransformer.masked_fill( - attn_mask, attn_mask != 0, tf.constant(-100.0) - ) # TODO: check if -100 can be changed to -math.inf - attn_mask = SwinTransformer.masked_fill( - attn_mask, attn_mask == 0, tf.constant(0.0) - ) + + # We now need to change values != 0 to something negative. When put + # through the SoftMax operation performed during the SW-MSA, it results + # in a value close to 0 for those patches that were not adjacent in the + # original feature map. + # Technically, the bigger the negative number the better + # (i.e. -math.inf), but it could lead to float values shenanigans so we + # choose -100 to stay consistent with the original implementation. + attn_mask = tf.where(attn_mask != 0, tf.constant(-100.0), attn_mask) return attn_mask def build(self, input_shape: tf.TensorShape) -> None: + assert input_shape.rank == 4 + assert input_shape[1] == input_shape[2] + + self.resolution = input_shape[1] + + # Resolution must be evenly divisible by the window size or reshape + # operations will not work + assert self.resolution % self.window_size == 0 + + if self.resolution <= self.window_size: + self.shift_size = 0 + self.window_size = self.resolution + + assert 0 <= self.shift_size < self.window_size + + if self.shift_size > 0: + attn_mask = self.build_attn_mask( + self.resolution, + self.window_size, + self.shift_size, + ) + + self.attn_mask = tf.Variable( + initial_value=attn_mask, + trainable=False, + name="attention_mask", + ) + else: + self.attn_mask = None + dim = input_shape[-1] mlp_hidden_dim = int(dim * self.mlp_ratio) self.mlp = SwinMlp(mlp_hidden_dim, out_features=dim, drop_p=self.drop_p) @@ -852,57 +833,35 @@ def call(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor: """Apply the transformations of the transformer layer. Args: - inputs: Input embeddings with shape ``(batch_size, num_patches, embed_dim)``. + inputs: Input embeddings with shape + ``(batch_size, height_patches, width_patches, embed_dim)``. + ``height_patches`` must be equal to ``width_patches``. Returns: Transformed embeddings with same shape as ``inputs``. """ - x = tf.ensure_shape(inputs, [None, self.resolution * self.resolution, None]) + shortcut_1 = inputs - shape = tf.shape(inputs) - - batch = shape[0] - channels = shape[2] - - shortcut_1 = x - - x = self.norm_1(x, **kwargs) - x = tf.reshape(x, [batch, self.resolution, self.resolution, channels]) - shifted_x = x + # Layer normalization + x = self.norm_1(inputs, **kwargs) + # Cyclic shift if self.shift_size > 0: - shifted_x = tf.roll( - x, shift=[-self.shift_size, -self.shift_size], axis=[1, 2] - ) + x = tf.roll(x, shift=[-self.shift_size, -self.shift_size], axis=[1, 2]) # Window partitioning - x_windows = self.window_partition(shifted_x, self.window_size) - x_windows = tf.reshape( - x_windows, [-1, self.window_size * self.window_size, channels] - ) + x = self.window_partition(x, self.window_size) # (Shifted) Window Multi-head Self Attention - attn_windows = self.attention(x_windows, mask=self.attn_mask, **kwargs) + x = self.attention(x, mask=self.attn_mask, **kwargs) - # Window merging - attn_windows = tf.reshape( - attn_windows, [-1, self.window_size, self.window_size, channels] - ) - shifted_x = self.window_reverse( - attn_windows, - tf.constant(self.resolution), - ) + # Undo window partitioning (window merging) + x = self.window_reverse(x, tf.constant(self.resolution)) - # Reverse cyclic shift + # Undo cyclic shift (reverse cyclic shift) if self.shift_size > 0: - x = tf.roll( - shifted_x, shift=[self.shift_size, self.shift_size], axis=[1, 2] - ) - else: - x = shifted_x - - x = tf.reshape(x, [batch, self.resolution * self.resolution, channels]) + x = tf.roll(x, shift=[self.shift_size, self.shift_size], axis=[1, 2]) # Sum the skip connection and the output of (S)W-MSA x = shortcut_1 + self.drop_path(x, **kwargs) @@ -921,7 +880,6 @@ def get_config(self) -> dict: config = super().get_config() config.update( { - "resolution": self.resolution, "num_heads": self.num_heads, "window_size": self.window_size, "shift_size": self.shift_size, @@ -933,4 +891,4 @@ def get_config(self) -> dict: return config def __repr__(self) -> str: - return f"{self.__class__.__name__}(resolution={self.resolution}, window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}, drop_p={self.drop_p}, drop_path_p={self.drop_path_p})" + return f"{self.__class__.__name__}(window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}, drop_p={self.drop_p}, drop_path_p={self.drop_path_p})" diff --git a/tests/test_model.py b/tests/test_model.py index f3fa4bf..13c5a21 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -1,9 +1,11 @@ import pathlib import random import tempfile +import unittest + import tensorflow as tf + import swin.model as sm -import unittest class TestSwin(unittest.TestCase): @@ -34,6 +36,7 @@ def setUp(self) -> None: self.embedding_to_head_ratio = 32 self.embed_dim = self.num_heads[0] * self.embedding_to_head_ratio self.drop_rate = random.random() + self.drop_path_rate = random.random() self.input = tf.random.uniform( [self.batch_size, self.img_size, self.img_size, self.img_channels], @@ -41,13 +44,13 @@ def setUp(self) -> None: ) self.model = sm.Swin( - self.input, - self.num_classes, - self.patch_size, - self.embed_dim, - self.depths, - self.num_heads, - self.drop_rate, + num_classes=self.num_classes, + patch_size=self.patch_size, + embed_dim=self.embed_dim, + depths=self.depths, + num_heads=self.num_heads, + drop_rate=self.drop_rate, + drop_path_rate=self.drop_path_rate, ) def _build_dataset(self) -> tf.data.Dataset: @@ -88,6 +91,44 @@ def test_model_output(self) -> None: self.assertEqual(output.shape[0], self.batch_size) self.assertEqual(output.shape[1], self.num_classes) + def test_model_variants_output(self) -> None: + variants = [sm.SwinT, sm.SwinS, sm.SwinB, sm.SwinL] + image_size = 224 + + for variant in variants: + with self.subTest(f"Variant {variant}"): + model = variant(num_classes=self.num_classes, drop_rate=self.drop_rate) + inputs = tf.random.uniform([self.batch_size, image_size, image_size, 3]) + output = model(inputs) + + self.assertEqual(output.shape[0], self.batch_size) + self.assertEqual(output.shape[1], self.num_classes) + + def test_model_custom_window_size_output(self) -> None: + depths = [2, 4, 2] + num_heads = [4, 8, 16] + patch_size = 6 + window_size = 8 + embed_dim = num_heads[0] * 32 + img_size = 384 + num_classes = random.randint(1, 10) + batch_size = 2 ** random.randint(1, 3) + + inputs = tf.random.uniform([batch_size, img_size, img_size, 3]) + model = sm.Swin( + num_classes=num_classes, + patch_size=patch_size, + window_size=window_size, + embed_dim=embed_dim, + depths=depths, + num_heads=num_heads, + ) + + output = model(inputs) + + self.assertEqual(output.shape[0], batch_size) + self.assertEqual(output.shape[1], num_classes) + def test_model_compile(self) -> None: self.model(self.input) @@ -121,11 +162,12 @@ def test_model_restore(self) -> None: output_2 = self.model(self.input) - diff = tf.abs(output_1 - output_2) - diff = diff * 0.01 # We tolerate a 1% difference - diff = tf.floor(diff) - diff = tf.cast(diff, tf.bool) - self.assertEqual(tf.reduce_any(diff), False) + self.assertEqual( + tf.reduce_all( + tf.raw_ops.ApproximateEqual(x=output_1, y=output_2, tolerance=1e-2) + ), + True, + ) # We tolerate a 1% difference def test_model_restore_config(self) -> None: output_1 = self.model(self.input) diff --git a/tests/test_modules.py b/tests/test_modules.py index 74544d0..9740e92 100644 --- a/tests/test_modules.py +++ b/tests/test_modules.py @@ -1,15 +1,17 @@ """Module containing tests for the modules of the Swin Transformer network.""" -import tensorflow as tf import random import unittest + +import tensorflow as tf + import swin.modules as sm class TestSwinLinear(unittest.TestCase): def setUp(self) -> None: self.batch_size = random.randint(1, 5) - self.img_size = 2 ** random.randint(5, 10) # 32-1024 px + self.img_size = 2 ** random.randint(3, 8) # 8-256 self.input = tf.random.uniform( [self.batch_size, self.img_size, self.img_size, 3], dtype=tf.float32 @@ -56,22 +58,13 @@ def setUp(self) -> None: self.wrong_input_shape = tf.random.uniform( [self.batch_size, self.img_size, 2 * self.img_size, 3], dtype=tf.float32 ) - self.wrong_input_dtype = tf.image.convert_image_dtype( - tf.random.uniform( - [self.batch_size, self.img_size, self.img_size, 3], - maxval=255, - dtype=tf.float32, - ), - tf.uint8, - ) - self.wrong_input_channels = self.wrong_input_shape = tf.random.uniform( + self.wrong_input_channels = tf.random.uniform( [self.batch_size, self.img_size, self.img_size, 1], dtype=tf.float32 ) self.wrong_inputs = [ self.wrong_input_shape, self.wrong_input_channels, - self.wrong_input_dtype, ] self.embed_dim = 2 ** random.randint(5, 8) # 32-256 @@ -87,12 +80,12 @@ def test_output(self) -> None: self.assertEqual(shape[0], self.batch_size) self.assertEqual(shape[-1], self.embed_dim) - self.assertEqual(output.dtype, tf.float32) + self.assertEqual(output.dtype, self.input.dtype) def test_wrong_input(self) -> None: for input_data in self.wrong_inputs: - with self.subTest(input_data): - self.assertRaises(Exception, self.layer, input_data) + with self.subTest(f"{input_data.shape}, {input_data.dtype}"): + self.assertRaises(AssertionError, self.layer, input_data) def test_trainable_variables(self) -> None: # Build the layer @@ -117,14 +110,15 @@ def test_gradient(self) -> None: class TestSwinPatchMerging(unittest.TestCase): def setUp(self) -> None: self.batch_size = random.randint(1, 5) - self.patch_size = 2 * random.randint(7, 28) # Any even number would be fine + self.patches = 2 * random.randint(7, 28) # Any even number would be fine self.embed_dim = 2 ** random.randint(5, 8) # 32-256 self.input = tf.random.uniform( - [self.batch_size, self.patch_size**2, self.embed_dim], dtype=tf.float32 + [self.batch_size, self.patches, self.patches, self.embed_dim], + dtype=tf.float32, ) - self.layer = sm.SwinPatchMerging(self.patch_size) + self.layer = sm.SwinPatchMerging() def test_build_odd_patch_size(self) -> None: self.assertRaises(Exception, sm.SwinPatchMerging, 7) # Any odd number is fine @@ -134,26 +128,21 @@ def test_output(self) -> None: shape = output.shape - self.assertEqual(len(shape), 3) + self.assertEqual(len(shape), 4) self.assertEqual(shape[0], self.batch_size) - self.assertEqual(shape[1], self.patch_size**2 / 4) - self.assertEqual(shape[2], self.embed_dim * 2) - self.assertEqual(output.dtype, tf.float32) + self.assertEqual(shape[1], shape[2]) + self.assertEqual(shape[1], self.patches / 2) + self.assertEqual(shape[3], self.embed_dim * 2) + self.assertEqual(output.dtype, self.input.dtype) def test_wrong_input(self) -> None: self.wrong_input_shape = tf.random.uniform( - [self.batch_size, self.patch_size * (self.patch_size * 2), self.embed_dim], + [self.batch_size, self.patches**2, self.embed_dim], dtype=tf.float32, ) - self.wrong_input_dtype = tf.random.uniform( - [self.batch_size, self.patch_size**2, self.embed_dim], - maxval=255, - dtype=tf.int32, - ) self.wrong_inputs = [ self.wrong_input_shape, - self.wrong_input_dtype, ] for input_data in self.wrong_inputs: @@ -186,6 +175,10 @@ def setUp(self) -> None: [4, 224, 224, 3], dtype=tf.float32 ) # Any shape and dtype would be ok + def test_wrong_probability(self) -> None: + self.assertRaises(AssertionError, sm.SwinDropPath, -1.2) + self.assertRaises(AssertionError, sm.SwinDropPath, 2.0) + def test_output_dtype(self) -> None: layer = sm.SwinDropPath(0.5) @@ -235,18 +228,27 @@ def setUp(self) -> None: self.layer = sm.SwinMlp(self.hidden_features, self.out_features, self.drop_p) self.batch_size = random.randint(1, 5) - self.input = tf.random.uniform([self.batch_size, 768, 96], dtype=tf.float32) + self.resolution = random.randint(1, 10) + self.embed_dim = random.randint(1, 100) + self.input = tf.random.uniform( + [self.batch_size, self.resolution, self.resolution, self.embed_dim], + dtype=tf.float32, + ) def test_output(self) -> None: output = self.layer(self.input) shape = output.shape - self.assertEqual(shape[0], self.batch_size) + self.assertEqual( + shape[:-1], [self.batch_size, self.resolution, self.resolution] + ) self.assertEqual(shape[-1], self.out_features) - self.assertEqual(output.dtype, tf.float32) + self.assertEqual(output.dtype, self.input.dtype) def test_wrong_input(self) -> None: - wrong_input = tf.random.uniform([self.batch_size, 224, 224, 96]) + wrong_input = tf.random.uniform( + [self.batch_size, self.resolution**2, self.embed_dim] + ) self.assertRaises(Exception, self.layer, wrong_input) @@ -274,13 +276,11 @@ class TestWindowAttention(unittest.TestCase): def setUp(self) -> None: self.window_size = 2 * random.randint( 1, 5 - ) # Could also be odd, but it simplifies the following operations + ) # Could also be odd, but it simplifies some future operations self.num_heads = random.randint(1, 5) self.proj_drop_r = random.random() - self.layer = sm.SwinWindowAttention( - self.window_size, self.num_heads, self.proj_drop_r - ) + self.layer = sm.SwinWindowAttention(self.num_heads, self.proj_drop_r) self.batch_size = random.randint(1, 5) self.embed_dim = self.num_heads * random.randint( @@ -289,12 +289,14 @@ def setUp(self) -> None: self.num_patches = self.window_size * random.randint( 2, 10 ) # Must be divisible by window_size - self.num_windows = int((self.num_patches / self.window_size)) ** 2 + self.num_windows = (self.num_patches // self.window_size) ** 2 self.input = tf.random.uniform( [ - self.batch_size * self.num_windows, - self.window_size**2, + self.batch_size, + self.num_windows, + self.window_size, + self.window_size, self.embed_dim, ], dtype=tf.float32, @@ -304,6 +306,15 @@ def setUp(self) -> None: self.num_patches, self.window_size, self.window_size // 2 ) + def test_build_relative_position_index_output(self) -> None: + output = self.layer.build_relative_position_index(self.window_size) + + self.assertEqual(output.dtype, tf.int32) + self.assertEqual(output.shape, [self.window_size**2, self.window_size**2]) + self.assertEqual( + output[0, self.window_size**2 - 1], 0 + ) # Top-right corner must be 0 + def test_output_no_shift(self) -> None: output = self.layer(self.input) @@ -322,25 +333,30 @@ def test_wrong_input(self) -> None: tf.random.uniform( [ self.batch_size * self.num_windows, - self.window_size**2 - 1, + self.window_size**2, self.embed_dim, ], dtype=tf.float32, ) ) # Wrong shape - wrong_inputs.append( - tf.random.uniform( - [ - self.batch_size * self.num_windows, - self.window_size**2, - self.num_heads * 2 + 1, - ], - dtype=tf.float32, + + if self.num_heads > 1: + # Incompatible emebedding dimensions. Only add this test when + # num_heads is greater than 1 or it will fail as n % 1 = 0 for any + # n. + wrong_inputs.append( + tf.random.uniform( + [ + self.batch_size * self.num_windows, + self.window_size**2, + self.num_heads - 1, + ], + dtype=tf.float32, + ) ) - ) # Incompatible emebedding dimensions for input_data in wrong_inputs: - with self.subTest(input_data): + with self.subTest(f"{input_data.shape}, {input_data.dtype}"): self.assertRaises(Exception, self.layer, input_data) def test_trainable_variables(self) -> None: @@ -365,19 +381,20 @@ def test_gradient(self) -> None: class TestSwinTransformer(unittest.TestCase): def setUp(self) -> None: - self.resolution = 2 ** random.randint(3, 6) # 8-64 + self.resolution = 2 ** random.randint(3, 6) # 8-64 patches self.num_heads = random.randint(1, 4) # Window size must be evenly divisible by resolution and > 0. We choose # > 1 to simplify tests where having window_size = resolution would # require a lot more code - self.window_size = int(self.resolution / (2 ** random.randint(1, 2))) - self.shift_size = int(self.window_size / 2) + self.window_size = self.resolution // ( + 2 ** random.randint(1, 2) + ) # 4-16 patches + self.shift_size = self.window_size // 2 self.mlp_ratio = 4.0 self.drop_p = random.random() self.drop_path_p = random.random() self.layer = sm.SwinTransformer( - self.resolution, self.num_heads, self.window_size, self.shift_size, @@ -386,14 +403,14 @@ def setUp(self) -> None: self.drop_path_p, ) - self.batch_size = 2 ** random.randint(0, 3) - self.patch_size = self.resolution**2 + self.batch_size = 2 ** random.randint(0, 3) # 1-8 self.embed_dim = self.num_heads * random.randint( 10, 20 ) # Any multiple of num_heads would be fine self.input = tf.random.uniform( - [self.batch_size, self.patch_size, self.embed_dim], dtype=tf.float32 + [self.batch_size, self.resolution, self.resolution, self.embed_dim], + dtype=tf.float32, ) def test_window_partition_wrong_inputs(self) -> None: @@ -404,10 +421,10 @@ def test_window_partition_wrong_inputs(self) -> None: [self.batch_size, self.resolution, self.resolution + 1, self.embed_dim], dtype=tf.float32, ) - ) # Not squared patches + ) # Non-square patches wrong_inputs.append( tf.random.uniform( - [self.batch_size, self.patch_size, self.embed_dim], + [self.batch_size, self.resolution**2, self.embed_dim], dtype=tf.float32, ) ) # Wrong rank @@ -429,10 +446,39 @@ def test_window_partition_output(self) -> None: output = sm.SwinTransformer.window_partition(input_data, self.window_size) self.assertEqual(output.dtype, input_data.dtype) + self.assertEqual(tf.rank(output), 5) self.assertEqual(output.shape[0] % self.batch_size, 0) - self.assertEqual(output.shape[1], self.window_size) - self.assertEqual(output.shape[1], output.shape[2]) - self.assertEqual(output.shape[3], self.embed_dim) + self.assertEqual(output.shape[2], output.shape[3]) + self.assertEqual(output.shape[2], self.window_size) + self.assertEqual(output.shape[4], self.embed_dim) + + def test_window_partition_order(self) -> None: + batch_size = 1 + resolution = 4 + embed_dim = 1 + window_size = 2 + + window_res = resolution // window_size + + input_data = tf.reshape( + tf.range(batch_size * resolution**2 * embed_dim), + [batch_size, resolution, resolution, embed_dim], + ) + output = sm.SwinTransformer.window_partition(input_data, window_size) + + for batch in range(batch_size): + for i in range(window_res): + for j in range(window_res): + win_idx = i * window_res + j + with self.subTest(f"window {win_idx}"): + out_win = output[batch, win_idx] + true_win = input_data[ + batch, + i * window_size : (i + 1) * window_size, + j * window_size : (j + 1) * window_size, + ] + + self.assertTrue(tf.reduce_all(tf.equal(out_win, true_win))) def test_window_reverse_wrong_inputs(self) -> None: wrong_inputs = [] @@ -440,26 +486,32 @@ def test_window_reverse_wrong_inputs(self) -> None: wrong_inputs.append( tf.random.uniform( [ - self.batch_size * int((self.resolution / self.window_size)) ** 2, + self.batch_size, + (self.resolution // self.window_size) ** 2, self.window_size, self.window_size + 1, self.embed_dim, ], dtype=tf.float32, ) - ) # Not squared windows + ) # Non-square windows wrong_inputs.append( tf.random.uniform( - [self.batch_size, self.window_size**2, self.embed_dim], + [ + self.batch_size, + (self.resolution // self.window_size) ** 2, + self.window_size**2, + self.embed_dim, + ], dtype=tf.float32, ) ) # Wrong rank for input_data in wrong_inputs: - with self.subTest(): + with self.subTest(f"{input_data.shape}, {input_data.dtype}"): self.assertRaises( Exception, - sm.SwinTransformer.window_partition, + sm.SwinTransformer.window_reverse, input_data, self.window_size, ) @@ -467,7 +519,8 @@ def test_window_reverse_wrong_inputs(self) -> None: def test_window_reverse_output(self) -> None: input_data = tf.random.uniform( [ - self.batch_size * int((self.resolution / self.window_size)) ** 2, + self.batch_size, + (self.resolution // self.window_size) ** 2, self.window_size, self.window_size, self.embed_dim, @@ -482,33 +535,6 @@ def test_window_reverse_output(self) -> None: output.shape, ) - def test_masked_fill_mask_wrong_shape(self) -> None: - x = random.randint(1, 100) - y = random.randint(1, 100) - z = random.randint(1, 10) - value = 5 - - input = tf.random.uniform([x, y], dtype=tf.float32) - mask = tf.ones([x, y, z], dtype=tf.bool) - - self.assertRaises( - Exception, sm.SwinTransformer.masked_fill, input, mask, tf.constant(value) - ) - - def test_masked_fill_output(self) -> None: - x = random.randint(1, 100) - y = random.randint(1, 100) - z = random.randint(1, 10) - value = 5 - - input = tf.random.uniform([x, y, z], dtype=tf.float32) - mask = tf.ones(input.shape, dtype=tf.float32) - output = sm.SwinTransformer.masked_fill(input, mask, tf.constant(value)) - - self.assertEqual(input.shape, output.shape) - self.assertEqual(input.dtype, output.dtype) - self.assertEqual(tf.reduce_all(output == value), True) - def test_build_attn_mask_output(self) -> None: output = sm.SwinTransformer.build_attn_mask( self.resolution, self.window_size, self.shift_size @@ -518,6 +544,9 @@ def test_build_attn_mask_output(self) -> None: self.assertEqual((self.resolution / self.window_size) ** 2, output.shape[0]) self.assertEqual(self.window_size**2, output.shape[1]) self.assertEqual(output.shape[1], output.shape[2]) + self.assertTrue( + tf.reduce_all(tf.logical_or(output == -100.0, output == 0)) + ) # No value other than 0 or -100 should be present def test_shift_size_bigger_than_window_size(self) -> None: self.assertRaises( @@ -563,20 +592,20 @@ def test_wrong_input(self) -> None: wrong_inputs.append( tf.random.uniform( - [self.batch_size, self.resolution**2 - 1, self.embed_dim], + [self.batch_size, self.resolution, self.resolution - 1, self.embed_dim], dtype=tf.float32, ) - ) # Wrong num_patches + ) # Non-square patches wrong_inputs.append( tf.random.uniform( - [self.batch_size, self.patch_size, self.embed_dim], + [self.batch_size, self.resolution, self.resolution, self.embed_dim], maxval=255, dtype=tf.int32, ) ) # Wrong dtype wrong_inputs.append( tf.random.uniform( - [self.batch_size, self.resolution, self.resolution, self.embed_dim], + [self.batch_size, self.resolution**2, self.embed_dim], dtype=tf.float32, ) ) # Wrong rank @@ -607,29 +636,27 @@ def test_gradient(self) -> None: class TestSwinStage(unittest.TestCase): def setUp(self) -> None: - self.resolution = 2 ** random.randint(3, 6) # 8-64 px + self.resolution = 2 ** random.randint(3, 6) # 8-64 patches self.depth = random.randint(1, 4) self.num_heads = random.randint(1, 4) - self.window_size = int( - self.resolution / (2 ** random.randint(1, 3)) + self.window_size = self.resolution // ( + 2 ** random.randint(1, 3) ) # Must be evenly divisible by resolution and > 0 self.mlp_ratio = 4.0 self.drop_p = random.random() self.drop_path_p = random.random() self.batch_size = 2 ** random.randint(0, 4) - self.num_patches = self.resolution**2 self.embed_dim = self.num_heads * random.randint( 10, 100 ) # Must be evenly divisible by num_heads self.input = tf.random.uniform( - [self.batch_size, self.num_patches, self.embed_dim], dtype=tf.float32 + [self.batch_size, self.resolution, self.resolution, self.embed_dim], + dtype=tf.float32, ) - def test_wrong_inputs(self) -> None: - layer = sm.SwinStage( - self.resolution, + self.layer_ds = sm.SwinStage( self.depth, self.num_heads, self.window_size, @@ -638,17 +665,7 @@ def test_wrong_inputs(self) -> None: self.drop_path_p, downsample=True, ) - - wrong_input = tf.random.uniform( - [self.batch_size, self.resolution, self.resolution, self.embed_dim], - dtype=tf.float32, - ) - - self.assertRaises(Exception, layer, wrong_input) - - def test_output_no_downsample(self) -> None: - layer = sm.SwinStage( - self.resolution, + self.layer_no_ds = sm.SwinStage( self.depth, self.num_heads, self.window_size, @@ -658,66 +675,58 @@ def test_output_no_downsample(self) -> None: downsample=False, ) - output = layer(self.input) + def test_wrong_inputs(self) -> None: + wrong_inputs = list() + + wrong_inputs.append( + tf.random.uniform( + [self.batch_size, self.resolution**2, self.embed_dim], + dtype=tf.float32, + ) + ) # Wrong rank + wrong_inputs.append( + tf.random.uniform( + [self.batch_size, self.resolution, self.resolution + 1, self.embed_dim], + dtype=tf.float32, + ) + ) # Non-square input + + for input_data in wrong_inputs: + with self.subTest(f"{input_data.shape}, {input_data.dtype}"): + self.assertRaises(AssertionError, self.layer_ds, input_data) + + def test_output_no_downsample(self) -> None: + output = self.layer_no_ds(self.input) self.assertEqual(output.dtype, self.input.dtype) self.assertEqual(output.shape, self.input.shape) def test_output_downsample(self) -> None: - layer = sm.SwinStage( - self.resolution, - self.depth, - self.num_heads, - self.window_size, - self.mlp_ratio, - self.drop_p, - self.drop_path_p, - downsample=True, - ) - - output = layer(self.input) + output = self.layer_ds(self.input) self.assertEqual(output.dtype, self.input.dtype) + self.assertEqual(len(output.shape), 4) self.assertEqual(output.shape[0], self.input.shape[0]) - self.assertEqual(output.shape[1], self.input.shape[1] / 4) - self.assertEqual(output.shape[2], self.input.shape[2] * 2) + self.assertEqual(output.shape[1], output.shape[2]) + self.assertEqual(output.shape[1], self.input.shape[1] / 2) + self.assertEqual(output.shape[3], self.input.shape[3] * 2) def test_trainable_variables(self) -> None: - layer = sm.SwinStage( - self.resolution, - self.depth, - self.num_heads, - self.window_size, - self.mlp_ratio, - self.drop_p, - self.drop_path_p, - downsample=True, - ) # Build the layer - layer(self.input) + self.layer_ds(self.input) - t_vars = layer.trainable_variables + t_vars = self.layer_ds.trainable_variables self.assertEqual(len(t_vars), 13 * self.depth + 3) def test_gradient(self) -> None: - layer = sm.SwinStage( - self.resolution, - self.depth, - self.num_heads, - self.window_size, - self.mlp_ratio, - self.drop_p, - self.drop_path_p, - downsample=True, - ) # Build the layer - layer(self.input) + self.layer_ds(self.input) with tf.GradientTape() as gt: - output = layer(self.input) + output = self.layer_ds(self.input) - gradients = gt.gradient(output, layer.trainable_variables) + gradients = gt.gradient(output, self.layer_ds.trainable_variables) self.assertNotIn(None, gradients)