Skip to content

Commit

Permalink
Formatted all the examples
Browse files Browse the repository at this point in the history
  • Loading branch information
GijsVermarien committed Oct 30, 2023
1 parent 7755ece commit 5871677
Show file tree
Hide file tree
Showing 23 changed files with 2,590 additions and 830 deletions.
238 changes: 182 additions & 56 deletions examples/ODE_2DOF_sparsity.ipynb

Large diffs are not rendered by default.

142 changes: 91 additions & 51 deletions examples/ODE_Example_coupled_nonlin.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
"torch.manual_seed(0)\n",
"\n",
"# Configuring GPU or CPU\n",
"if False: #torch.cuda.is_available():\n",
"if False: # torch.cuda.is_available():\n",
" device = \"cuda\"\n",
"else:\n",
" device = \"cpu\"\n",
Expand Down Expand Up @@ -91,6 +91,7 @@
],
"source": [
"import deepymod as dpm\n",
"\n",
"dpm"
]
},
Expand All @@ -112,7 +113,7 @@
"def dU_dt_true(U):\n",
" \"\"\"\n",
" returns the right hand side of the differential equation\"\"\"\n",
" return [U[1], -1*U[1] - 5*np.sin(U[0])]\n",
" return [U[1], -1 * U[1] - 5 * np.sin(U[0])]\n",
"\n",
"\n",
"def dU_dt_sin(U, t):\n",
Expand Down Expand Up @@ -140,8 +141,8 @@
" \"\"\"\n",
" Creates data which is the solution of the simple ODE system example.\n",
" the output has torch.float32 format.\n",
" \n",
" Args: \n",
"\n",
" Args:\n",
" U0: Initial condition\n",
" ts: Time points to evaluate the ODE at.\n",
" \"\"\"\n",
Expand Down Expand Up @@ -192,7 +193,7 @@
"source": [
"# see deepymod.data.base.Dataset for definition of `Dataset` class.\n",
"dataset = Dataset(\n",
" create_data, #load_function -> see documentation\n",
" create_data, # load_function -> see documentation\n",
" preprocess_kwargs={\"noise_level\": 0.1},\n",
" device=device,\n",
")\n",
Expand All @@ -208,7 +209,7 @@
"ax.set_xlabel(\"t\")\n",
"ax.legend()\n",
"ax.set_title(\"unsampled and unnormalized\")\n",
"plt.show()\n"
"plt.show()"
]
},
{
Expand All @@ -226,12 +227,12 @@
"outputs": [],
"source": [
"def custom_normalize(feature):\n",
" \"\"\"minmax all features by their absolute maximum\n",
" Args:\n",
" feature (torch.tensor): data to be minmax normalized\n",
" Returns:\n",
" (torch.tensor): minmaxed data\"\"\"\n",
" return (feature/feature.abs().max(axis=0).values)"
" \"\"\"minmax all features by their absolute maximum\n",
" Args:\n",
" feature (torch.tensor): data to be minmax normalized\n",
" Returns:\n",
" (torch.tensor): minmaxed data\"\"\"\n",
" return feature / feature.abs().max(axis=0).values"
]
},
{
Expand Down Expand Up @@ -268,11 +269,13 @@
"source": [
"dataset = Dataset(\n",
" create_data,\n",
" preprocess_kwargs={\"noise_level\": 0, \n",
" preprocess_kwargs={\n",
" \"noise_level\": 0,\n",
" \"normalize_coords\": True,\n",
" \"normalize_data\": True,},\n",
" \"normalize_data\": True,\n",
" },\n",
" apply_normalize=custom_normalize,\n",
" device=device\n",
" device=device,\n",
")\n",
"fig, ax = plt.subplots()\n",
"ax.scatter(dataset.coords.cpu(), dataset.data[:, 0].cpu(), label=\"u\", s=3)\n",
Expand Down Expand Up @@ -338,7 +341,7 @@
" \"normalize_data\": False,\n",
" },\n",
" apply_normalize=custom_normalize,\n",
" device=device\n",
" device=device,\n",
")\n",
"dataset.data.shape\n",
"fig, ax = plt.subplots()\n",
Expand All @@ -363,8 +366,8 @@
"metadata": {},
"outputs": [],
"source": [
" # see deepymod.data.base.get_train_test_loader for definition of `get_train_test_loader` function.\n",
" # the shuffle is completely random mixing latter and earlier times\n",
"# see deepymod.data.base.get_train_test_loader for definition of `get_train_test_loader` function.\n",
"# the shuffle is completely random mixing latter and earlier times\n",
"train_dataloader, test_dataloader = get_train_test_loader(dataset, train_test_split=0.8)"
]
},
Expand All @@ -383,7 +386,9 @@
],
"source": [
"for batch_idx, (input, target) in enumerate(train_dataloader):\n",
" print(f\"Batch {batch_idx} - Input shape: {input.shape}, Target shape: {target.shape}\")\n"
" print(\n",
" f\"Batch {batch_idx} - Input shape: {input.shape}, Target shape: {target.shape}\"\n",
" )"
]
},
{
Expand All @@ -401,7 +406,9 @@
],
"source": [
"for batch_idx, (input, target) in enumerate(test_dataloader):\n",
" print(f\"Batch {batch_idx} - Input shape: {input.shape}, Target shape: {target.shape}\")\n"
" print(\n",
" f\"Batch {batch_idx} - Input shape: {input.shape}, Target shape: {target.shape}\"\n",
" )"
]
},
{
Expand Down Expand Up @@ -443,17 +450,18 @@
" def library(\n",
" self, input: Tuple[torch.Tensor, torch.Tensor]\n",
" ) -> Tuple[TensorList, TensorList]:\n",
"\n",
" prediction, data = input\n",
" samples = prediction.shape[0]\n",
" poly_list = []\n",
" deriv_list = []\n",
" time_deriv_list = []\n",
" # Construct the theta matrix\n",
" C = torch.ones_like(prediction[:, 0]).view(samples, -1) # constant of ones\n",
" u = prediction[:, 0].view(samples, -1) # a term proportional to u\n",
" v = prediction[:, 1].view(samples, -1) # a term proportional to v\n",
" theta = torch.cat((C, u, v, torch.cos(u), torch.sin(u)), dim=1) # a term porportional to sin(u)\n",
" C = torch.ones_like(prediction[:, 0]).view(samples, -1) # constant of ones\n",
" u = prediction[:, 0].view(samples, -1) # a term proportional to u\n",
" v = prediction[:, 1].view(samples, -1) # a term proportional to v\n",
" theta = torch.cat(\n",
" (C, u, v, torch.cos(u), torch.sin(u)), dim=1\n",
" ) # a term porportional to sin(u)\n",
" # Construct a list of time_derivatives\n",
" time_deriv_list = []\n",
" for output in torch.arange(prediction.shape[1]):\n",
Expand Down Expand Up @@ -548,7 +556,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = DeepMoD(network, library, estimator, constraint)# .to(device)"
"model = DeepMoD(network, library, estimator, constraint) # .to(device)"
]
},
{
Expand Down Expand Up @@ -672,7 +680,8 @@
],
"source": [
"from deepymod.analysis import plot_history\n",
"plot_history(foldername)\n"
"\n",
"plot_history(foldername)"
]
},
{
Expand Down Expand Up @@ -761,17 +770,21 @@
}
],
"source": [
"library = ['1','u','v','cos(u)','sin(u)']\n",
"ders = ['u_t','v_t']\n",
"for sparse, coeff_vector, der in zip(model.sparsity_masks,model.constraint_coeffs(),ders):\n",
" expression = ''\n",
" coeffs = [\"%.5f\" % number for number in (coeff_vector.detach().cpu().numpy().squeeze())]\n",
" monomials = [str(a) + '*'+ str(b) for a, b in zip(coeffs, library)]\n",
"library = [\"1\", \"u\", \"v\", \"cos(u)\", \"sin(u)\"]\n",
"ders = [\"u_t\", \"v_t\"]\n",
"for sparse, coeff_vector, der in zip(\n",
" model.sparsity_masks, model.constraint_coeffs(), ders\n",
"):\n",
" expression = \"\"\n",
" coeffs = [\n",
" \"%.5f\" % number for number in (coeff_vector.detach().cpu().numpy().squeeze())\n",
" ]\n",
" monomials = [str(a) + \"*\" + str(b) for a, b in zip(coeffs, library)]\n",
" sparse_array = sparse.detach().cpu().numpy()\n",
" print(der,'=', np.extract(sparse_array, monomials))\n",
" #for sparse_element, coeff in zip(sparse.detach().cpu().numpy(),coeff_vector.detach().cpu().numpy()):\n",
" print(der, \"=\", np.extract(sparse_array, monomials))\n",
" # for sparse_element, coeff in zip(sparse.detach().cpu().numpy(),coeff_vector.detach().cpu().numpy()):\n",
" # expression += ' +'+str(coeff)+library[sparse_element]\n",
" #print(der,' =',expression, '\\t')"
" # print(der,' =',expression, '\\t')"
]
},
{
Expand Down Expand Up @@ -801,37 +814,62 @@
"def dU_dt_estimate(U):\n",
" TH = [1, U[0], U[1], np.cos(U[0]), np.sin(U[0])]\n",
" output = []\n",
" for sparse, coeff_vector, der in zip(model.sparsity_masks,model.constraint_coeffs(),ders):\n",
" for sparse, coeff_vector, der in zip(\n",
" model.sparsity_masks, model.constraint_coeffs(), ders\n",
" ):\n",
" coeffs = coeff_vector.detach().cpu().numpy().squeeze()\n",
" output.append(np.sum(sparse.detach().cpu().numpy()*coeffs*TH))\n",
" output.append(np.sum(sparse.detach().cpu().numpy() * coeffs * TH))\n",
" # Here U is a vector such that u=U[0] and v=U[1]. This function should return [u', v']\n",
" return output\n",
"\n",
"\n",
"fig, axs = plt.subplots(1, 2, figsize=(15, 5))\n",
"\n",
"# subfigure 1\n",
"axs[0].scatter(dataset.coords, model(dataset.coords)[0][:,0].detach().cpu().numpy(), label='u_model')\n",
"axs[0].scatter(dataset.coords, model(dataset.coords)[0][:,1].detach().cpu().numpy(), label='v_model')\n",
"axs[0].scatter(\n",
" dataset.coords,\n",
" model(dataset.coords)[0][:, 0].detach().cpu().numpy(),\n",
" label=\"u_model\",\n",
")\n",
"axs[0].scatter(\n",
" dataset.coords,\n",
" model(dataset.coords)[0][:, 1].detach().cpu().numpy(),\n",
" label=\"v_model\",\n",
")\n",
"axs[0].scatter(dataset.coords.cpu(), dataset.data[:, 0].cpu(), label=\"u\", s=3)\n",
"axs[0].scatter(dataset.coords.cpu(), dataset.data[:, 1].cpu(), label=\"v\", s=3)\n",
"axs[0].legend()\n",
"\n",
"# subfigure 2\n",
"deriv_eq_estimate = np.array(list(map(dU_dt_estimate, dataset.data.detach().cpu().numpy())))\n",
"deriv_eq_estimate = np.array(\n",
" list(map(dU_dt_estimate, dataset.data.detach().cpu().numpy()))\n",
")\n",
"deriv_eq_true = np.array(list(map(dU_dt_true, dataset.data.detach().cpu().numpy())))\n",
"\n",
"axs[1].scatter(dataset.coords, deriv_eq_estimate[:,0], label='u_dot via discovered eq')\n",
"axs[1].scatter(dataset.coords, deriv_eq_estimate[:,1], label='v_dot via discovered eq') \n",
"axs[1].scatter(dataset.coords, deriv_eq_estimate[:, 0], label=\"u_dot via discovered eq\")\n",
"axs[1].scatter(dataset.coords, deriv_eq_estimate[:, 1], label=\"v_dot via discovered eq\")\n",
"\n",
"axs[1].scatter(dataset.coords, model(dataset.coords)[1][0].detach().cpu().numpy(), label='u_dot via model autodiff', marker='x', s=25)\n",
"axs[1].scatter(dataset.coords, model(dataset.coords)[1][1].detach().cpu().numpy(), label='v_dot via model autodiff', marker='x', s=25)\n",
"axs[1].scatter(\n",
" dataset.coords,\n",
" model(dataset.coords)[1][0].detach().cpu().numpy(),\n",
" label=\"u_dot via model autodiff\",\n",
" marker=\"x\",\n",
" s=25,\n",
")\n",
"axs[1].scatter(\n",
" dataset.coords,\n",
" model(dataset.coords)[1][1].detach().cpu().numpy(),\n",
" label=\"v_dot via model autodiff\",\n",
" marker=\"x\",\n",
" s=25,\n",
")\n",
"\n",
"axs[1].scatter(dataset.coords, deriv_eq_true[:,0], label='u_dot true', marker='o', s=6)\n",
"axs[1].scatter(dataset.coords, deriv_eq_true[:,1], label='v_dot true', marker='o', s=6)\n",
"axs[1].scatter(dataset.coords, deriv_eq_true[:, 0], label=\"u_dot true\", marker=\"o\", s=6)\n",
"axs[1].scatter(dataset.coords, deriv_eq_true[:, 1], label=\"v_dot true\", marker=\"o\", s=6)\n",
"\n",
"axs[1].legend() \n",
"axs[1].legend()\n",
"\n",
"plt.show()\n"
"plt.show()"
]
},
{
Expand All @@ -858,7 +896,9 @@
],
"source": [
"for batch_idx, (input, target) in enumerate(train_dataloader):\n",
" print(f\"Batch {batch_idx} - Input shape: {input.shape}, Target shape: {target.shape}\")\n",
" print(\n",
" f\"Batch {batch_idx} - Input shape: {input.shape}, Target shape: {target.shape}\"\n",
" )\n",
" prediction, coordinates = model.func_approx(input)\n",
" time_derivs, thetas = model.library((prediction, coordinates))\n",
" print(f\"{prediction.shape = }, {coordinates.shape = }\")\n",
Expand All @@ -882,7 +922,7 @@
}
],
"source": [
"[3]*3"
"[3] * 3"
]
},
{
Expand Down
Loading

0 comments on commit 5871677

Please sign in to comment.