Skip to content

Commit

Permalink
UP031
Browse files Browse the repository at this point in the history
  • Loading branch information
Borda committed Jan 9, 2025
1 parent 53d0f60 commit fc3b3ae
Show file tree
Hide file tree
Showing 7 changed files with 16 additions and 22 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def plot_dists(val_dict, color="C0", xlabel=None, stat="count", use_kde=True):
kde=use_kde and ((val_dict[key].max() - val_dict[key].min()) > 1e-8),
) # Only plot kde if there is variance
hidden_dim_str = (
r"(%i $\to$ %i)" % (val_dict[key].shape[1], val_dict[key].shape[0]) if len(val_dict[key].shape) > 1 else ""
r"(%i $\to$ %i)" % (val_dict[key].shape[1], val_dict[key].shape[0]) if len(val_dict[key].shape) > 1 else "" # noqa: UP031
)
key_ax.set_title(f"{key} {hidden_dim_str}")
if xlabel is not None:
Expand Down
8 changes: 4 additions & 4 deletions course_UvA-DL/05-transformers-and-MH-attention/MHAttention.py
Original file line number Diff line number Diff line change
Expand Up @@ -633,8 +633,8 @@ def forward(self, x):
fig, ax = plt.subplots(2, 2, figsize=(12, 4))
ax = [a for a_list in ax for a in a_list]
for i in range(len(ax)):
ax[i].plot(np.arange(1, 17), pe[i, :16], color="C%i" % i, marker="o", markersize=6, markeredgecolor="black")
ax[i].set_title("Encoding in hidden dimension %i" % (i + 1))
ax[i].plot(np.arange(1, 17), pe[i, :16], color=f"C{i}", marker="o", markersize=6, markeredgecolor="black")
ax[i].set_title(f"Encoding in hidden dimension {i + 1}")
ax[i].set_xlabel("Position in sequence", fontsize=10)
ax[i].set_ylabel("Positional encoding", fontsize=10)
ax[i].set_xticks(np.arange(1, 17))
Expand Down Expand Up @@ -1088,7 +1088,7 @@ def plot_attention_maps(input_data, attn_maps, idx=0):
ax[row][column].set_xticklabels(input_data.tolist())
ax[row][column].set_yticks(list(range(seq_len)))
ax[row][column].set_yticklabels(input_data.tolist())
ax[row][column].set_title("Layer %i, Head %i" % (row + 1, column + 1))
ax[row][column].set_title(f"Layer {row + 1}, Head {column + 1}")
fig.subplots_adjust(hspace=0.5)
plt.show()

Expand Down Expand Up @@ -1590,7 +1590,7 @@ def visualize_prediction(idx):
visualize_prediction(mistakes[-1])
print("Probabilities:")
for i, p in enumerate(preds[mistakes[-1]].cpu().numpy()):
print("Image %i: %4.2f%%" % (i, 100.0 * p))
print(f"Image {i}: {100.0 * p:4.2f}%")

# %% [markdown]
# In this example, the model confuses a palm tree with a building, giving a probability of ~90% to image 2, and 8% to the actual anomaly.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,7 @@ def on_epoch_end(self, trainer, pl_module):
grid = torchvision.utils.make_grid(
imgs_to_plot, nrow=imgs_to_plot.shape[0], normalize=True, value_range=(-1, 1)
)
trainer.logger.experiment.add_image("generation_%i" % i, grid, global_step=trainer.current_epoch)
trainer.logger.experiment.add_image(f"generation_{i}", grid, global_step=trainer.current_epoch)

def generate_imgs(self, pl_module):
pl_module.eval()
Expand Down
6 changes: 3 additions & 3 deletions course_UvA-DL/08-deep-autoencoders/notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ def on_train_epoch_end(self, trainer, pl_module):
def train_cifar(latent_dim):
# Create a PyTorch Lightning trainer with the generation callback
trainer = pl.Trainer(
default_root_dir=os.path.join(CHECKPOINT_PATH, "cifar10_%i" % latent_dim),
default_root_dir=os.path.join(CHECKPOINT_PATH, f"cifar10_{latent_dim}"),
accelerator="auto",
devices=1,
max_epochs=500,
Expand All @@ -402,7 +402,7 @@ def train_cifar(latent_dim):
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need

# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, "cifar10_%i.ckpt" % latent_dim)
pretrained_filename = os.path.join(CHECKPOINT_PATH, f"cifar10_{latent_dim}.ckpt")
if os.path.isfile(pretrained_filename):
print("Found pretrained model, loading...")
model = Autoencoder.load_from_checkpoint(pretrained_filename)
Expand Down Expand Up @@ -475,7 +475,7 @@ def visualize_reconstructions(model, input_imgs):
grid = torchvision.utils.make_grid(imgs, nrow=4, normalize=True, value_range=(-1, 1))
grid = grid.permute(1, 2, 0)
plt.figure(figsize=(7, 4.5))
plt.title("Reconstructed from %i latents" % (model.hparams.latent_dim))
plt.title(f"Reconstructed from {model.hparams.latent_dim} latents")
plt.imshow(grid)
plt.axis("off")
plt.show()
Expand Down
4 changes: 2 additions & 2 deletions course_UvA-DL/09-normalizing-flows/NF.py
Original file line number Diff line number Diff line change
Expand Up @@ -512,7 +512,7 @@ def visualize_dequantization(quants, prior=None):
x_ticks = []
for v in np.unique(out):
indices = np.where(out == v)
color = to_rgb("C%i" % v)
color = to_rgb(f"C{v}")
plt.fill_between(inp[indices], prob[indices], np.zeros(indices[0].shape[0]), color=color + (0.5,), label=str(v))
plt.plot([inp[indices[0][0]]] * 2, [0, prob[indices[0][0]]], color=color)
plt.plot([inp[indices[0][-1]]] * 2, [0, prob[indices[0][-1]]], color=color)
Expand All @@ -525,7 +525,7 @@ def visualize_dequantization(quants, prior=None):
plt.xlim(inp.min(), inp.max())
plt.xlabel("z")
plt.ylabel("Probability")
plt.title("Dequantization distribution for %i discrete values" % quants)
plt.title(f"Dequantization distribution for {quants} discrete values")
plt.legend()
plt.show()
plt.close()
Expand Down
2 changes: 1 addition & 1 deletion course_UvA-DL/10-autoregressive-image-modeling/notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ def show_center_recep_field(img, out):
for l_idx in range(4):
vert_img = vert_conv(vert_img)
horiz_img = horiz_conv(horiz_img) + vert_img
print("Layer %i" % (l_idx + 2))
print(f"Layer {l_idx + 2}")
show_center_recep_field(inp_img, horiz_img)

# %% [markdown]
Expand Down
14 changes: 4 additions & 10 deletions course_UvA-DL/12-meta-learning/notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -703,10 +703,7 @@ def test_proto_net(model, dataset, data_feats=None, k_shot=4):
data_feats = None
for k in [2, 4, 8, 16, 32]:
protonet_accuracies[k], data_feats = test_proto_net(protonet_model, test_set, data_feats=data_feats, k_shot=k)
print(
"Accuracy for k=%i: %4.2f%% (+-%4.2f%%)"
% (k, 100.0 * protonet_accuracies[k][0], 100 * protonet_accuracies[k][1])
)
print(f"Accuracy for k={k}: {100.0 * protonet_accuracies[k][0]:4.2f}% (+-{100 * protonet_accuracies[k][1]:4.2f}%)")

# %% [markdown]
# Before discussing the results above, let's first plot the accuracies over number of examples in the support set:
Expand Down Expand Up @@ -1174,8 +1171,7 @@ def test_protomaml(model, dataset, k_shot=4):

for k in protomaml_accuracies:
print(
"Accuracy for k=%i: %4.2f%% (+-%4.2f%%)"
% (k, 100.0 * protomaml_accuracies[k][0], 100.0 * protomaml_accuracies[k][1])
f"Accuracy for k={k}: {100.0 * protomaml_accuracies[k][0]:4.2f}% (+-{100.0 * protomaml_accuracies[k][1]:4.2f}%)"
)

# %% [markdown]
Expand Down Expand Up @@ -1267,8 +1263,7 @@ def test_protomaml(model, dataset, k_shot=4):
protonet_model, svhn_fewshot_dataset, data_feats=data_feats, k_shot=k
)
print(
"Accuracy for k=%i: %4.2f%% (+-%4.2f%%)"
% (k, 100.0 * protonet_svhn_accuracies[k][0], 100 * protonet_svhn_accuracies[k][1])
f"Accuracy for k={k}: {100.0 * protonet_svhn_accuracies[k][0]:4.2f}% (+-{100 * protonet_svhn_accuracies[k][1]:4.2f}%)"
)

# %% [markdown]
Expand All @@ -1295,8 +1290,7 @@ def test_protomaml(model, dataset, k_shot=4):

for k in protomaml_svhn_accuracies:
print(
"Accuracy for k=%i: %4.2f%% (+-%4.2f%%)"
% (k, 100.0 * protomaml_svhn_accuracies[k][0], 100.0 * protomaml_svhn_accuracies[k][1])
f"Accuracy for k={k}: {100.0 * protomaml_svhn_accuracies[k][0]:4.2f}% (+-{100.0 * protomaml_svhn_accuracies[k][1]:4.2f}%)"
)

# %% [markdown]
Expand Down

0 comments on commit fc3b3ae

Please sign in to comment.