diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b0cc9f30b..29901406b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,7 +9,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace @@ -45,7 +45,7 @@ repos: args: ["--print-width=120"] - repo: https://github.com/executablebooks/mdformat - rev: 0.7.17 + rev: 0.7.21 hooks: - id: mdformat additional_dependencies: @@ -55,7 +55,7 @@ repos: args: ["--number"] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.0 + rev: v0.8.6 hooks: # try to fix what is possible - id: ruff diff --git a/README.md b/README.md index 78b2b8657..c5d863541 100644 --- a/README.md +++ b/README.md @@ -44,13 +44,13 @@ The addition has to formed as new folder: accelerator: - CPU ``` -- _\[optional\]_ requirements listed in `requirements.txt` in the particular folder (in case you need some other packaged then listed the parent folder) +- _[optional]_ requirements listed in `requirements.txt` in the particular folder (in case you need some other packaged then listed the parent folder) ## Using datasets It is quite common to use some public or competition's dataset for your example. We facilitate this via defining the data sources in the metafile. -There are two basic options, download a file from web or pul Kaggle dataset _\[Experimental\]_: +There are two basic options, download a file from web or pul Kaggle dataset _[Experimental]_: ```yaml datasets: diff --git a/course_UvA-DL/03-initialization-and-optimization/notebook.py b/course_UvA-DL/03-initialization-and-optimization/notebook.py index 0af979277..9a1f878ea 100644 --- a/course_UvA-DL/03-initialization-and-optimization/notebook.py +++ b/course_UvA-DL/03-initialization-and-optimization/notebook.py @@ -225,7 +225,7 @@ def plot_dists(val_dict, color="C0", xlabel=None, stat="count", use_kde=True): kde=use_kde and ((val_dict[key].max() - val_dict[key].min()) > 1e-8), ) # Only plot kde if there is variance hidden_dim_str = ( - r"(%i $\to$ %i)" % (val_dict[key].shape[1], val_dict[key].shape[0]) if len(val_dict[key].shape) > 1 else "" + r"(%i $\to$ %i)" % (val_dict[key].shape[1], val_dict[key].shape[0]) if len(val_dict[key].shape) > 1 else "" # noqa: UP031 ) key_ax.set_title(f"{key} {hidden_dim_str}") if xlabel is not None: diff --git a/course_UvA-DL/05-transformers-and-MH-attention/MHAttention.py b/course_UvA-DL/05-transformers-and-MH-attention/MHAttention.py index 113d89871..85b07fa8d 100644 --- a/course_UvA-DL/05-transformers-and-MH-attention/MHAttention.py +++ b/course_UvA-DL/05-transformers-and-MH-attention/MHAttention.py @@ -633,8 +633,8 @@ def forward(self, x): fig, ax = plt.subplots(2, 2, figsize=(12, 4)) ax = [a for a_list in ax for a in a_list] for i in range(len(ax)): - ax[i].plot(np.arange(1, 17), pe[i, :16], color="C%i" % i, marker="o", markersize=6, markeredgecolor="black") - ax[i].set_title("Encoding in hidden dimension %i" % (i + 1)) + ax[i].plot(np.arange(1, 17), pe[i, :16], color=f"C{i}", marker="o", markersize=6, markeredgecolor="black") + ax[i].set_title(f"Encoding in hidden dimension {i + 1}") ax[i].set_xlabel("Position in sequence", fontsize=10) ax[i].set_ylabel("Positional encoding", fontsize=10) ax[i].set_xticks(np.arange(1, 17)) @@ -1088,7 +1088,7 @@ def plot_attention_maps(input_data, attn_maps, idx=0): ax[row][column].set_xticklabels(input_data.tolist()) ax[row][column].set_yticks(list(range(seq_len))) ax[row][column].set_yticklabels(input_data.tolist()) - ax[row][column].set_title("Layer %i, Head %i" % (row + 1, column + 1)) + ax[row][column].set_title(f"Layer {row + 1}, Head {column + 1}") fig.subplots_adjust(hspace=0.5) plt.show() @@ -1590,7 +1590,7 @@ def visualize_prediction(idx): visualize_prediction(mistakes[-1]) print("Probabilities:") for i, p in enumerate(preds[mistakes[-1]].cpu().numpy()): - print("Image %i: %4.2f%%" % (i, 100.0 * p)) + print(f"Image {i}: {100.0 * p:4.2f}%") # %% [markdown] # In this example, the model confuses a palm tree with a building, giving a probability of ~90% to image 2, and 8% to the actual anomaly. diff --git a/course_UvA-DL/07-deep-energy-based-generative-models/notebook.py b/course_UvA-DL/07-deep-energy-based-generative-models/notebook.py index 4ee5c260d..ab25eaca9 100644 --- a/course_UvA-DL/07-deep-energy-based-generative-models/notebook.py +++ b/course_UvA-DL/07-deep-energy-based-generative-models/notebook.py @@ -570,7 +570,7 @@ def on_epoch_end(self, trainer, pl_module): grid = torchvision.utils.make_grid( imgs_to_plot, nrow=imgs_to_plot.shape[0], normalize=True, value_range=(-1, 1) ) - trainer.logger.experiment.add_image("generation_%i" % i, grid, global_step=trainer.current_epoch) + trainer.logger.experiment.add_image(f"generation_{i}", grid, global_step=trainer.current_epoch) def generate_imgs(self, pl_module): pl_module.eval() diff --git a/course_UvA-DL/08-deep-autoencoders/notebook.py b/course_UvA-DL/08-deep-autoencoders/notebook.py index 499a9c559..2ac5af959 100644 --- a/course_UvA-DL/08-deep-autoencoders/notebook.py +++ b/course_UvA-DL/08-deep-autoencoders/notebook.py @@ -388,7 +388,7 @@ def on_train_epoch_end(self, trainer, pl_module): def train_cifar(latent_dim): # Create a PyTorch Lightning trainer with the generation callback trainer = pl.Trainer( - default_root_dir=os.path.join(CHECKPOINT_PATH, "cifar10_%i" % latent_dim), + default_root_dir=os.path.join(CHECKPOINT_PATH, f"cifar10_{latent_dim}"), accelerator="auto", devices=1, max_epochs=500, @@ -402,7 +402,7 @@ def train_cifar(latent_dim): trainer.logger._default_hp_metric = None # Optional logging argument that we don't need # Check whether pretrained model exists. If yes, load it and skip training - pretrained_filename = os.path.join(CHECKPOINT_PATH, "cifar10_%i.ckpt" % latent_dim) + pretrained_filename = os.path.join(CHECKPOINT_PATH, f"cifar10_{latent_dim}.ckpt") if os.path.isfile(pretrained_filename): print("Found pretrained model, loading...") model = Autoencoder.load_from_checkpoint(pretrained_filename) @@ -475,7 +475,7 @@ def visualize_reconstructions(model, input_imgs): grid = torchvision.utils.make_grid(imgs, nrow=4, normalize=True, value_range=(-1, 1)) grid = grid.permute(1, 2, 0) plt.figure(figsize=(7, 4.5)) - plt.title("Reconstructed from %i latents" % (model.hparams.latent_dim)) + plt.title(f"Reconstructed from {model.hparams.latent_dim} latents") plt.imshow(grid) plt.axis("off") plt.show() diff --git a/course_UvA-DL/09-normalizing-flows/NF.py b/course_UvA-DL/09-normalizing-flows/NF.py index 13dc66bf5..84a3b8776 100644 --- a/course_UvA-DL/09-normalizing-flows/NF.py +++ b/course_UvA-DL/09-normalizing-flows/NF.py @@ -512,7 +512,7 @@ def visualize_dequantization(quants, prior=None): x_ticks = [] for v in np.unique(out): indices = np.where(out == v) - color = to_rgb("C%i" % v) + color = to_rgb(f"C{v}") plt.fill_between(inp[indices], prob[indices], np.zeros(indices[0].shape[0]), color=color + (0.5,), label=str(v)) plt.plot([inp[indices[0][0]]] * 2, [0, prob[indices[0][0]]], color=color) plt.plot([inp[indices[0][-1]]] * 2, [0, prob[indices[0][-1]]], color=color) @@ -525,7 +525,7 @@ def visualize_dequantization(quants, prior=None): plt.xlim(inp.min(), inp.max()) plt.xlabel("z") plt.ylabel("Probability") - plt.title("Dequantization distribution for %i discrete values" % quants) + plt.title(f"Dequantization distribution for {quants} discrete values") plt.legend() plt.show() plt.close() diff --git a/course_UvA-DL/10-autoregressive-image-modeling/notebook.py b/course_UvA-DL/10-autoregressive-image-modeling/notebook.py index a3539be32..2c9dd562c 100644 --- a/course_UvA-DL/10-autoregressive-image-modeling/notebook.py +++ b/course_UvA-DL/10-autoregressive-image-modeling/notebook.py @@ -403,7 +403,7 @@ def show_center_recep_field(img, out): for l_idx in range(4): vert_img = vert_conv(vert_img) horiz_img = horiz_conv(horiz_img) + vert_img - print("Layer %i" % (l_idx + 2)) + print(f"Layer {l_idx + 2}") show_center_recep_field(inp_img, horiz_img) # %% [markdown] diff --git a/course_UvA-DL/12-meta-learning/notebook.py b/course_UvA-DL/12-meta-learning/notebook.py index 04f36d67f..48ddd677c 100644 --- a/course_UvA-DL/12-meta-learning/notebook.py +++ b/course_UvA-DL/12-meta-learning/notebook.py @@ -703,10 +703,7 @@ def test_proto_net(model, dataset, data_feats=None, k_shot=4): data_feats = None for k in [2, 4, 8, 16, 32]: protonet_accuracies[k], data_feats = test_proto_net(protonet_model, test_set, data_feats=data_feats, k_shot=k) - print( - "Accuracy for k=%i: %4.2f%% (+-%4.2f%%)" - % (k, 100.0 * protonet_accuracies[k][0], 100 * protonet_accuracies[k][1]) - ) + print(f"Accuracy for k={k}: {100.0 * protonet_accuracies[k][0]:4.2f}% (+-{100 * protonet_accuracies[k][1]:4.2f}%)") # %% [markdown] # Before discussing the results above, let's first plot the accuracies over number of examples in the support set: @@ -1174,8 +1171,7 @@ def test_protomaml(model, dataset, k_shot=4): for k in protomaml_accuracies: print( - "Accuracy for k=%i: %4.2f%% (+-%4.2f%%)" - % (k, 100.0 * protomaml_accuracies[k][0], 100.0 * protomaml_accuracies[k][1]) + f"Accuracy for k={k}: {100.0 * protomaml_accuracies[k][0]:4.2f}% (+-{100.0 * protomaml_accuracies[k][1]:4.2f}%)" ) # %% [markdown] @@ -1267,8 +1263,7 @@ def test_protomaml(model, dataset, k_shot=4): protonet_model, svhn_fewshot_dataset, data_feats=data_feats, k_shot=k ) print( - "Accuracy for k=%i: %4.2f%% (+-%4.2f%%)" - % (k, 100.0 * protonet_svhn_accuracies[k][0], 100 * protonet_svhn_accuracies[k][1]) + f"Accuracy for k={k}: {100.0 * protonet_svhn_accuracies[k][0]:4.2f}% (+-{100 * protonet_svhn_accuracies[k][1]:4.2f}%)" ) # %% [markdown] @@ -1295,8 +1290,7 @@ def test_protomaml(model, dataset, k_shot=4): for k in protomaml_svhn_accuracies: print( - "Accuracy for k=%i: %4.2f%% (+-%4.2f%%)" - % (k, 100.0 * protomaml_svhn_accuracies[k][0], 100.0 * protomaml_svhn_accuracies[k][1]) + f"Accuracy for k={k}: {100.0 * protomaml_svhn_accuracies[k][0]:4.2f}% (+-{100.0 * protomaml_svhn_accuracies[k][1]:4.2f}%)" ) # %% [markdown]