Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

example notebook #2

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
284 changes: 284 additions & 0 deletions SGD-Example.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,284 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "21a12036-3da1-42a8-9952-9693b8291f05",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from sirf import STIR as pet\n",
"from sirf.contrib.partitioner import partitioner\n",
"\n",
"from cil.optimisation.functions import SGFunction\n",
"from cil.optimisation.algorithms import GD\n",
"from cil.optimisation.utilities import Sampler, ConstantStepSize\n",
"from cil.optimisation.utilities.callbacks import ProgressCallback\n",
"\n",
"from img_quality_cil_stir import ImageQualityCallback\n",
"\n",
"\n",
"# engine's messages go to files, except error messages, which go to stdout\n",
"_ = pet.MessageRedirector('info.txt', 'warn.txt')\n",
"# Needed for get_subsets()\n",
"pet.AcquisitionData.set_storage_scheme('memory')\n",
"# fewer message from STIR and SIRF\n",
"pet.set_verbosity(0)\n",
"\n",
"def initial_OSEM(acquired_data, additive_term, mult_factors, initial_image):\n",
" num_subsets = 1\n",
" data, acq_models, obj_funs = partitioner.data_partition(acquired_data, additive_term, mult_factors, num_subsets)\n",
"\n",
" obj_fun = pet.make_Poisson_loglikelihood(data[0])\n",
" obj_fun.set_acquisition_model(acq_models[0])\n",
" recon = pet.OSMAPOSLReconstructor()\n",
" recon.set_objective_function(obj_fun)\n",
" recon.set_current_estimate(initial_image)\n",
" # some arbitrary numbers here\n",
" recon.set_num_subsets(2)\n",
" num_subiters = 14\n",
" recon.set_num_subiterations(num_subiters)\n",
" recon.set_up(initial_image)\n",
" recon.process()\n",
" return recon.get_output()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fc7ea384-a57a-42a6-8f35-4fe9826279e6",
"metadata": {},
"outputs": [],
"source": [
"def construct_RDP(penalty_strength, initial_image, kappa, max_scaling=1e-3):\n",
" '''\n",
" Construct the Relative Difference Prior (RDP)\n",
" \n",
" WARNING: return prior with beta/num_subsets (as currently needed for BSREM implementations)\n",
" '''\n",
" prior = pet.RelativeDifferencePrior()\n",
" # need to make it differentiable\n",
" epsilon = initial_image.max() * max_scaling\n",
" prior.set_epsilon(epsilon)\n",
" prior.set_penalisation_factor(penalty_strength)\n",
" prior.set_kappa(kappa)\n",
" prior.set_up(initial_image)\n",
" return prior\n",
" \n",
"def add_prior(prior, objective_functions):\n",
" '''Add prior evenly to every objective function.\n",
" \n",
" WARNING: it modifies the objective functions'''\n",
" for f in objective_functions:\n",
" f.set_prior(prior) "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6b5020bc-a42c-455f-89de-dbedc928dffb",
"metadata": {},
"outputs": [],
"source": [
"# https://github.com/SyneRBI/PETRIC/blob/recon_with_metrics/metrics/NEMA-IQ-CIL.ipynb\n",
"import tensorboardX\n",
"from datetime import datetime\n",
"import numpy as np\n",
"# create a tensorboardX summary writer\n",
"dt_string = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n",
"tb_summary_writer = tensorboardX.SummaryWriter(f'recons/exp-{dt_string}')\n",
"def MSE(x,y):\n",
" \"\"\" mean squared error between two numpy arrays\n",
" \"\"\"\n",
" return ((x-y)**2).mean()\n",
"\n",
"def MAE(x,y):\n",
" \"\"\" mean absolute error between two numpy arrays\n",
" \"\"\"\n",
" return np.abs(x-y).mean()\n",
"\n",
"def PSNR(x, y, scale = None):\n",
" \"\"\" peak signal to noise ratio between two numpy arrays x and y\n",
" y is considered to be the reference array and the default scale\n",
" needed for the PSNR is assumed to be the max of this array\n",
" \"\"\"\n",
" \n",
" mse = ((x-y)**2).mean()\n",
" \n",
" if scale == None:\n",
" scale = y.max()\n",
" \n",
" return 10*np.log10((scale**2) / mse)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e2002475-9d52-4795-b9f3-61bec44eb748",
"metadata": {},
"outputs": [],
"source": [
"os.chdir('/home/jovyan/work/Challenge24/data')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fe04a664-e85b-4a1b-a299-3812d8d22c6b",
"metadata": {},
"outputs": [],
"source": [
"acquired_data = pet.AcquisitionData('prompts.hs')\n",
"\n",
"additive_term = pet.AcquisitionData('additive.hs')\n",
"\n",
"mult_factors = pet.AcquisitionData('multfactors.hs')\n",
"\n",
"initial_image = pet.ImageData('OSEM_image.hv')\n",
"osem_sol = initial_image\n",
"# This should be an image to give voxel-dependent weights \n",
"# (here predetermined as the row-sum of the Hessian of the log-likelihood at an initial OSEM reconstruction, see eq. 25 in [7])\n",
"kappa = initial_image.allocate(1.)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8e53cf71-21b7-47e4-97c9-8c98ec045b59",
"metadata": {},
"outputs": [],
"source": [
"# load the ROIs\n",
"\n",
"ground_truth = initial_image\n",
"roi_image_dict = {\n",
" 'S1': pet.ImageData('S1.hv'),\n",
" 'S2': pet.ImageData('S2.hv'),\n",
" 'S3': pet.ImageData('S3.hv'),\n",
" 'S4': pet.ImageData('S4.hv'),\n",
" 'S5': pet.ImageData('S5.hv'),\n",
" 'S6': pet.ImageData('S6.hv'),\n",
" 'S7': pet.ImageData('S7.hv'),\n",
"}\n",
"# instantiate ImageQualityCallback\n",
"img_qual_callback = ImageQualityCallback(ground_truth, tb_summary_writer,\n",
" roi_mask_dict = roi_image_dict,\n",
" metrics_dict = {'MSE':MSE, \n",
" 'MAE':MAE, \n",
" 'PSNR':PSNR},\n",
" statistics_dict = {'MEAN': (lambda x: x.mean()),\n",
" 'STDDEV': (lambda x: x.std()),\n",
" 'MAX': (lambda x: x.max()),\n",
" 'COM': (lambda x: np.array([3,2,1]))},\n",
" )\n"
]
},
{
"cell_type": "markdown",
"id": "1613c02e-6475-4d3b-9ffb-82bc703ed115",
"metadata": {},
"source": [
"## Using SIRF Objective Functions"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "38959d1d-4694-4369-b317-2dc8088216e0",
"metadata": {},
"outputs": [],
"source": [
"num_subsets = 7\n",
"data, acq_models, obj_funs = partitioner.data_partition(acquired_data, additive_term, mult_factors, num_subsets, mode='staggered', initial_image=initial_image)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ba81c1b4-3714-4c34-89d6-e5a745ec9470",
"metadata": {},
"outputs": [],
"source": [
"\n",
"# add RDP prior to the objective functions\n",
"step_size = 1e-7\n",
"add_regulariser = True\n",
"if add_regulariser:\n",
" alpha = 500\n",
" prior = construct_RDP(alpha, initial_image, kappa)\n",
" # epsilon = initial_image.max()*1e-4\n",
" # prior = add_RDP(alpha, epsilon, obj_funs)\n",
" add_prior(prior, obj_funs)\n",
" step_size = 1e-10"
]
Comment on lines +205 to +215
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will have to be replaced after merging #20

},
{
"cell_type": "code",
"execution_count": null,
"id": "69b2fde4-7d03-4342-9ec7-3ed0360604b0",
"metadata": {},
"outputs": [],
"source": [
"#set up and run the gradient descent algorithm\n",
"\n",
"sampler = Sampler.random_without_replacement(len(obj_funs))\n",
"# requires a minus sign for CIL's algorithm as they are minimisers\n",
"F = - SGFunction(obj_funs, sampler=sampler)\n",
"# ISTA default step_size is 0.99*2.0/F.L\n",
"step_size_rule = ConstantStepSize(step_size)\n",
"\n",
"alg = GD(initial=initial_image, objective_function=F, step_size=step_size_rule)"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not using ISTA here, but GD. Please change and add positivity constraint

]
},
{
"cell_type": "code",
"execution_count": null,
"id": "26477d50-ac4e-4f11-af08-b873bb417597",
"metadata": {},
"outputs": [],
"source": [
"alg.run(10, callbacks=[img_qual_callback, ProgressCallback()])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0dded5f8-3bae-46aa-868f-452e0e5bd763",
"metadata": {},
"outputs": [],
"source": [
"from cil.utilities.display import show2D \n",
"cmax = .15\n",
"im_slice = 70\n",
"osem_sol = initial_image\n",
"show2D([osem_sol.as_array()[im_slice,:,:], \n",
" alg.solution.as_array()[im_slice,:,:]], \n",
" title=['OSEM',f\"{alg.__class__.__name__} epoch {alg.iteration/num_subsets}\"], \n",
" cmap=\"PuRd\", fix_range=[(0, 0.2),(0,0.2)], origin='upper-left')"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Loading