From 2ad07a3ddf33d0d731a3d2c9fed1300d95b64a6c Mon Sep 17 00:00:00 2001 From: karp2601 Date: Mon, 13 Nov 2023 10:27:46 -0500 Subject: [PATCH 01/10] Moving functions around and renaming modules. --- scilpy/dwi/operations.py | 103 +++ scilpy/dwi/utils.py | 67 -- scilpy/reconst/{divide_fit.py => divide.py} | 108 ++- scilpy/reconst/{raw_signal.py => dwi.py} | 37 +- scilpy/reconst/fodf.py | 85 ++- scilpy/reconst/multi_processes.py | 689 -------------------- scilpy/reconst/sh.py | 513 ++++++++++++++- 7 files changed, 802 insertions(+), 800 deletions(-) create mode 100644 scilpy/dwi/operations.py rename scilpy/reconst/{divide_fit.py => divide.py} (68%) rename scilpy/reconst/{raw_signal.py => dwi.py} (70%) delete mode 100644 scilpy/reconst/multi_processes.py diff --git a/scilpy/dwi/operations.py b/scilpy/dwi/operations.py new file mode 100644 index 000000000..3e8a5001f --- /dev/null +++ b/scilpy/dwi/operations.py @@ -0,0 +1,103 @@ +import numpy as np + + +def _rescale_intensity(val, slope, in_max, bc_max): + """ + Rescale an intensity value given a scaling factor. + This scaling factor ensures that the intensity + range before and after correction is the same. + + Parameters + ---------- + val: float + Value to be scaled + scale: float + Scaling factor to be applied + in_max: float + Max possible value + bc_max: float + Max value in the bias correction value range + + Returns + ------- + rescaled_value: float + Bias field corrected value scaled by the slope + of the data + """ + + return in_max - slope * (bc_max - val) + + +# https://github.com/stnava/ANTs/blob/master/Examples/N4BiasFieldCorrection.cxx +def rescale_dwi(in_data, bc_data): + """ + Apply N4 Bias Field Correction to a DWI volume. + bc stands for bias correction. The code comes + from the C++ ANTS implmentation. + + Parameters + ---------- + in_data: ndarray (x, y, z, ndwi) + Input DWI volume 4-dimensional data. + bc_data: ndarray (x, y, z, ndwi) + Bias field correction volume estimated from ANTS + Copied for every dimension of the DWI 4-th dimension + + Returns + ------- + bc_data: ndarray (x, y, z, ndwi) + Bias field corrected DWI volume + """ + + in_min = np.amin(in_data) + in_max = np.amax(in_data) + bc_min = np.amin(bc_data) + bc_max = np.amax(bc_data) + + slope = (in_max - in_min) / (bc_max - bc_min) + + chunk = np.arange(0, len(in_data), 100000) + chunk = np.append(chunk, len(in_data)) + for i in range(len(chunk)-1): + nz_bc_data = bc_data[chunk[i]:chunk[i+1]] + rescale_func = np.vectorize(_rescale_intensity, otypes=[np.float32]) + + rescaled_data = rescale_func(nz_bc_data, slope, in_max, bc_max) + bc_data[chunk[i]:chunk[i+1]] = rescaled_data + + return bc_data + + +def compute_dwi_attenuation(dwi_weights: np.ndarray, b0: np.ndarray): + """ Compute signal attenuation by dividing the dwi signal with the b0. + + Parameters: + ----------- + dwi_weights : np.ndarray of shape (X, Y, Z, #gradients) + Diffusion weighted images. + b0 : np.ndarray of shape (X, Y, Z) + B0 image. + + Returns + ------- + dwi_attenuation : np.ndarray + Signal attenuation (Diffusion weights normalized by the B0). + """ + b0 = b0[..., None] # Easier to work if it is a 4D array. + + # Make sure that, in every voxels, weights are lower in the b0. Should + # always be the case, but with the noise we never know! + erroneous_voxels = np.any(dwi_weights > b0, axis=3) + nb_erroneous_voxels = np.sum(erroneous_voxels) + if nb_erroneous_voxels != 0: + logging.info("# of voxels where `dwi_signal > b0` in any direction: " + "{}".format(nb_erroneous_voxels)) + dwi_weights = np.minimum(dwi_weights, b0) + + # Compute attenuation + dwi_attenuation = dwi_weights / b0 + + # Make sure we didn't divide by 0. + dwi_attenuation[np.logical_not(np.isfinite(dwi_attenuation))] = 0. + + return dwi_attenuation diff --git a/scilpy/dwi/utils.py b/scilpy/dwi/utils.py index 34f80263f..7285e0fc3 100644 --- a/scilpy/dwi/utils.py +++ b/scilpy/dwi/utils.py @@ -7,73 +7,6 @@ from scilpy.image.utils import volume_iterator -def _rescale_intensity(val, slope, in_max, bc_max): - """ - Rescale an intensity value given a scaling factor. - This scaling factor ensures that the intensity - range before and after correction is the same. - - Parameters - ---------- - val: float - Value to be scaled - scale: float - Scaling factor to be applied - in_max: float - Max possible value - bc_max: float - Max value in the bias correction value range - - Returns - ------- - rescaled_value: float - Bias field corrected value scaled by the slope - of the data - """ - - return in_max - slope * (bc_max - val) - - -# https://github.com/stnava/ANTs/blob/master/Examples/N4BiasFieldCorrection.cxx -def rescale_dwi(in_data, bc_data): - """ - Apply N4 Bias Field Correction to a DWI volume. - bc stands for bias correction. The code comes - from the C++ ANTS implmentation. - - Parameters - ---------- - in_data: ndarray (x, y, z, ndwi) - Input DWI volume 4-dimensional data. - bc_data: ndarray (x, y, z, ndwi) - Bias field correction volume estimated from ANTS - Copied for every dimension of the DWI 4-th dimension - - Returns - ------- - bc_data: ndarray (x, y, z, ndwi) - Bias field corrected DWI volume - """ - - in_min = np.amin(in_data) - in_max = np.amax(in_data) - bc_min = np.amin(bc_data) - bc_max = np.amax(bc_data) - - slope = (in_max - in_min) / (bc_max - bc_min) - - chunk = np.arange(0, len(in_data), 100000) - chunk = np.append(chunk, len(in_data)) - for i in range(len(chunk)-1): - nz_bc_data = bc_data[chunk[i]:chunk[i+1]] - rescale_func = np.vectorize(_rescale_intensity, otypes=[np.float32]) - - rescaled_data = rescale_func(nz_bc_data, slope, in_max, bc_max) - bc_data[chunk[i]:chunk[i+1]] = rescaled_data - - return bc_data - - def extract_dwi_shell(dwi, bvals, bvecs, bvals_to_extract, tol=20, block_size=None): """Extracts the DWI volumes that are on specific b-value shells. Many diff --git a/scilpy/reconst/divide_fit.py b/scilpy/reconst/divide.py similarity index 68% rename from scilpy/reconst/divide_fit.py rename to scilpy/reconst/divide.py index e4958feb3..d834a1a3e 100644 --- a/scilpy/reconst/divide_fit.py +++ b/scilpy/reconst/divide.py @@ -1,9 +1,12 @@ +# -*- coding: utf-8 -*- +import itertools +import multiprocessing import numpy as np from scipy.optimize import curve_fit from scipy.special import erf -def get_bounds(): +def _get_bounds(): """Define the lower (lb) and upper (ub) boundaries of the fitting parameters, being the signal without diffusion weighting (S0), the mean diffusivity (MD), the isotropic variance (V_I) and the anisotropic variance @@ -25,7 +28,7 @@ def get_bounds(): return lb, ub -def random_p0(signal, gtab_infos, lb, ub, weight, n_iter): +def _random_p0(signal, gtab_infos, lb, ub, weight, n_iter): """Produce a guess of initial parameters for the fit, by calculating the signals of a given number of random sets of parameters and keeping the one closest to the input signal. @@ -130,7 +133,7 @@ def my_gamma_fit2data(gtab_infos, *args): signal = gamma_fit2data(gtab_infos, params_SI) return signal * weight - lb_SI, ub_SI = get_bounds() + lb_SI, ub_SI = _get_bounds() lb_SI = np.concatenate((lb_SI, 0.5 * np.ones(ns))) ub_SI = np.concatenate((ub_SI, 2.0 * np.ones(ns))) lb_SI[0] *= np.max(signal) @@ -150,8 +153,8 @@ def my_gamma_fit2data(gtab_infos, *args): if do_weight_pa: weight *= weight_pa() - p0_SI = random_p0(signal, gtab_infos, lb_SI, ub_SI, weight, - random_iters) + p0_SI = _random_p0(signal, gtab_infos, lb_SI, ub_SI, weight, + random_iters) p0_unit = p0_SI / unit_to_SI params_unit, params_cov = curve_fit(my_gamma_fit2data, gtab_infos, signal * weight, p0=p0_unit, @@ -257,3 +260,98 @@ def gamma_fit2metrics(params): microFA[np.isnan(microFA)] = 0 return microFA, MK_I, MK_A, MK_T + + +def _fit_gamma_parallel(args): + data = args[0] + gtab_infos = args[1] + fit_iters = args[2] + random_iters = args[3] + do_weight_bvals = args[4] + do_weight_pa = args[5] + do_multiple_s0 = args[6] + chunk_id = args[7] + + sub_fit_array = np.zeros((data.shape[0], 4)) + for i in range(data.shape[0]): + if data[i].any(): + sub_fit_array[i] = gamma_data2fit(data[i], gtab_infos, fit_iters, + random_iters, do_weight_bvals, + do_weight_pa, do_multiple_s0) + + return chunk_id, sub_fit_array + + +def fit_gamma(data, gtab_infos, mask=None, fit_iters=1, random_iters=50, + do_weight_bvals=False, do_weight_pa=False, do_multiple_s0=False, + nbr_processes=None): + """Fit the gamma model to data + + Parameters + ---------- + data : np.ndarray (4d) + Diffusion data, powder averaged. Obtained as output of the function + `reconst.b_tensor_utils.generate_powder_averaged_data`. + gtab_infos : np.ndarray + Contains information about the gtab, such as the unique bvals, the + encoding types, the number of directions and the acquisition index. + Obtained as output of the function + `reconst.b_tensor_utils.generate_powder_averaged_data`. + mask : np.ndarray, optional + If `mask` is provided, only the data inside the mask will be + used for computations. + fit_iters : int, optional + Number of iterations in the gamma fit. Defaults to 1. + random_iters : int, optional + Number of random sets of parameters tested to find the initial + parameters. Defaults to 50. + do_weight_bvals : bool , optional + If set, does a weighting on the bvalues in the gamma fit. + do_weight_pa : bool, optional + If set, does a powder averaging weighting in the gamma fit. + do_multiple_s0 : bool, optional + If set, takes into account multiple baseline signals. + nbr_processes : int, optional + The number of subprocesses to use. + Default: multiprocessing.cpu_count() + + Returns + ------- + fit_array : np.ndarray + Array containing the fit + """ + data_shape = data.shape + if mask is None: + mask = np.sum(data, axis=3).astype(bool) + + nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \ + or nbr_processes <= 0 else nbr_processes + + # Ravel the first 3 dimensions while keeping the 4th intact, like a list of + # 1D time series voxels. Then separate it in chunks of len(nbr_processes). + data = data[mask].reshape((np.count_nonzero(mask), data_shape[3])) + chunks = np.array_split(data, nbr_processes) + + chunk_len = np.cumsum([0] + [len(c) for c in chunks]) + pool = multiprocessing.Pool(nbr_processes) + results = pool.map(_fit_gamma_parallel, + zip(chunks, + itertools.repeat(gtab_infos), + itertools.repeat(fit_iters), + itertools.repeat(random_iters), + itertools.repeat(do_weight_bvals), + itertools.repeat(do_weight_pa), + itertools.repeat(do_multiple_s0), + np.arange(len(chunks)))) + pool.close() + pool.join() + + # Re-assemble the chunk together in the original shape. + fit_array = np.zeros((data_shape[0:3])+(4,)) + tmp_fit_array = np.zeros((np.count_nonzero(mask), 4)) + for i, fit in results: + tmp_fit_array[chunk_len[i]:chunk_len[i+1]] = fit + + fit_array[mask] = tmp_fit_array + + return fit_array diff --git a/scilpy/reconst/raw_signal.py b/scilpy/reconst/dwi.py similarity index 70% rename from scilpy/reconst/raw_signal.py rename to scilpy/reconst/dwi.py index 841eec929..6c888fbfd 100644 --- a/scilpy/reconst/raw_signal.py +++ b/scilpy/reconst/dwi.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - import logging from dipy.core.sphere import Sphere @@ -8,6 +7,7 @@ from scilpy.gradients.bvec_bval_tools import (check_b0_threshold, identify_shells, is_normalized_bvecs, normalize_bvecs) +from scilpy.dwi.operations import compute_dwi_attenuation def compute_sh_coefficients(dwi, gradient_table, sh_order=4, @@ -84,38 +84,3 @@ def compute_sh_coefficients(dwi, gradient_table, sh_order=4, sh *= mask[..., None] return sh - - -def compute_dwi_attenuation(dwi_weights: np.ndarray, b0: np.ndarray): - """ Compute signal attenuation by dividing the dwi signal with the b0. - - Parameters: - ----------- - dwi_weights : np.ndarray of shape (X, Y, Z, #gradients) - Diffusion weighted images. - b0 : np.ndarray of shape (X, Y, Z) - B0 image. - - Returns - ------- - dwi_attenuation : np.ndarray - Signal attenuation (Diffusion weights normalized by the B0). - """ - b0 = b0[..., None] # Easier to work if it is a 4D array. - - # Make sure that, in every voxels, weights are lower in the b0. Should - # always be the case, but with the noise we never know! - erroneous_voxels = np.any(dwi_weights > b0, axis=3) - nb_erroneous_voxels = np.sum(erroneous_voxels) - if nb_erroneous_voxels != 0: - logging.info("# of voxels where `dwi_signal > b0` in any direction: " - "{}".format(nb_erroneous_voxels)) - dwi_weights = np.minimum(dwi_weights, b0) - - # Compute attenuation - dwi_attenuation = dwi_weights / b0 - - # Make sure we didn't divide by 0. - dwi_attenuation[np.logical_not(np.isfinite(dwi_attenuation))] = 0. - - return dwi_attenuation diff --git a/scilpy/reconst/fodf.py b/scilpy/reconst/fodf.py index 4777c8b27..3cb1b614e 100644 --- a/scilpy/reconst/fodf.py +++ b/scilpy/reconst/fodf.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- - +import itertools import logging +import multiprocessing +import numpy as np from dipy.data import get_sphere -import numpy as np +from dipy.reconst.mcsd import MSDeconvFit +from dipy.reconst.multi_voxel import MultiVoxelFit from scilpy.reconst.utils import find_order_from_nb_coeff, get_b_matrix +from dipy.utils.optpkg import optional_package +cvx, have_cvxpy, _ = optional_package("cvxpy") + def get_ventricles_max_fodf(data, fa, md, zoom, args): """ @@ -93,3 +99,78 @@ def get_ventricles_max_fodf(data, fa, md, zoom, args): logging.debug('Average max fodf value: {}'.format(sum_of_max / count)) return sum_of_max / count, mask + + +def _fit_from_model_parallel(args): + model = args[0] + data = args[1] + chunk_id = args[2] + + sub_fit_array = np.zeros((data.shape[0],), dtype='object') + for i in range(data.shape[0]): + if data[i].any(): + try: + sub_fit_array[i] = model.fit(data[i]) + except cvx.error.SolverError: + coeff = np.full((len(model.n)), np.NaN) + sub_fit_array[i] = MSDeconvFit(model, coeff, None) + + return chunk_id, sub_fit_array + + +def fit_from_model(model, data, mask=None, nbr_processes=None): + """Fit the model to data + + Parameters + ---------- + model : a model instance + `model` will be used to fit the data. + data : np.ndarray (4d) + Diffusion data. + mask : np.ndarray, optional + If `mask` is provided, only the data inside the mask will be + used for computations. + nbr_processes : int, optional + The number of subprocesses to use. + Default: multiprocessing.cpu_count() + + Returns + ------- + fit_array : np.ndarray + Array containing the fit + """ + data_shape = data.shape + if mask is None: + mask = np.sum(data, axis=3).astype(bool) + else: + mask_any = np.sum(data, axis=3).astype(bool) + mask *= mask_any + + nbr_processes = multiprocessing.cpu_count() \ + if nbr_processes is None or nbr_processes <= 0 \ + else nbr_processes + + # Ravel the first 3 dimensions while keeping the 4th intact, like a list of + # 1D time series voxels. Then separate it in chunks of len(nbr_processes). + data = data[mask].reshape((np.count_nonzero(mask), data_shape[3])) + chunks = np.array_split(data, nbr_processes) + + chunk_len = np.cumsum([0] + [len(c) for c in chunks]) + pool = multiprocessing.Pool(nbr_processes) + results = pool.map(_fit_from_model_parallel, + zip(itertools.repeat(model), + chunks, + np.arange(len(chunks)))) + pool.close() + pool.join() + + # Re-assemble the chunk together in the original shape. + fit_array = np.zeros(data_shape[0:3], dtype='object') + tmp_fit_array = np.zeros((np.count_nonzero(mask)), dtype='object') + for i, fit in results: + tmp_fit_array[chunk_len[i]:chunk_len[i+1]] = fit + + fit_array[mask] = tmp_fit_array + fit_array = MultiVoxelFit(model, fit_array, mask) + + return fit_array diff --git a/scilpy/reconst/multi_processes.py b/scilpy/reconst/multi_processes.py deleted file mode 100644 index b22f42c5e..000000000 --- a/scilpy/reconst/multi_processes.py +++ /dev/null @@ -1,689 +0,0 @@ -import itertools -import logging -import multiprocessing - -from dipy.direction.peaks import peak_directions -from dipy.reconst.multi_voxel import MultiVoxelFit -from dipy.reconst.odf import gfa -from dipy.reconst.shm import sh_to_sf_matrix, order_from_ncoef -from dipy.reconst.mcsd import MSDeconvFit -import numpy as np - -from scilpy.reconst.divide_fit import gamma_data2fit - -from dipy.utils.optpkg import optional_package -cvx, have_cvxpy, _ = optional_package("cvxpy") - - -def fit_from_model_parallel(args): - model = args[0] - data = args[1] - chunk_id = args[2] - - sub_fit_array = np.zeros((data.shape[0],), dtype='object') - for i in range(data.shape[0]): - if data[i].any(): - try: - sub_fit_array[i] = model.fit(data[i]) - except cvx.error.SolverError: - coeff = np.full((len(model.n)), np.NaN) - sub_fit_array[i] = MSDeconvFit(model, coeff, None) - - return chunk_id, sub_fit_array - - -def fit_from_model(model, data, mask=None, nbr_processes=None): - """Fit the model to data - - Parameters - ---------- - model : a model instance - `model` will be used to fit the data. - data : np.ndarray (4d) - Diffusion data. - mask : np.ndarray, optional - If `mask` is provided, only the data inside the mask will be - used for computations. - nbr_processes : int, optional - The number of subprocesses to use. - Default: multiprocessing.cpu_count() - - Returns - ------- - fit_array : np.ndarray - Array containing the fit - """ - data_shape = data.shape - if mask is None: - mask = np.sum(data, axis=3).astype(bool) - else: - mask_any = np.sum(data, axis=3).astype(bool) - mask *= mask_any - - nbr_processes = multiprocessing.cpu_count() \ - if nbr_processes is None or nbr_processes <= 0 \ - else nbr_processes - - # Ravel the first 3 dimensions while keeping the 4th intact, like a list of - # 1D time series voxels. Then separate it in chunks of len(nbr_processes). - data = data[mask].reshape((np.count_nonzero(mask), data_shape[3])) - chunks = np.array_split(data, nbr_processes) - - chunk_len = np.cumsum([0] + [len(c) for c in chunks]) - pool = multiprocessing.Pool(nbr_processes) - results = pool.map(fit_from_model_parallel, - zip(itertools.repeat(model), - chunks, - np.arange(len(chunks)))) - pool.close() - pool.join() - - # Re-assemble the chunk together in the original shape. - fit_array = np.zeros(data_shape[0:3], dtype='object') - tmp_fit_array = np.zeros((np.count_nonzero(mask)), dtype='object') - for i, fit in results: - tmp_fit_array[chunk_len[i]:chunk_len[i+1]] = fit - - fit_array[mask] = tmp_fit_array - fit_array = MultiVoxelFit(model, fit_array, mask) - - return fit_array - - -def peaks_from_sh_parallel(args): - shm_coeff = args[0] - B = args[1] - sphere = args[2] - relative_peak_threshold = args[3] - absolute_threshold = args[4] - min_separation_angle = args[5] - npeaks = args[6] - normalize_peaks = args[7] - chunk_id = args[8] - is_symmetric = args[9] - - data_shape = shm_coeff.shape[0] - peak_dirs = np.zeros((data_shape, npeaks, 3)) - peak_values = np.zeros((data_shape, npeaks)) - peak_indices = np.zeros((data_shape, npeaks), dtype='int') - peak_indices.fill(-1) - - for idx in range(len(shm_coeff)): - if shm_coeff[idx].any(): - odf = np.dot(shm_coeff[idx], B) - odf[odf < absolute_threshold] = 0. - - dirs, peaks, ind = peak_directions(odf, sphere, - relative_peak_threshold, - min_separation_angle, - is_symmetric) - - if peaks.shape[0] != 0: - n = min(npeaks, peaks.shape[0]) - - peak_dirs[idx][:n] = dirs[:n] - peak_indices[idx][:n] = ind[:n] - peak_values[idx][:n] = peaks[:n] - - if normalize_peaks: - peak_values[idx][:n] /= peaks[0] - peak_dirs[idx] *= peak_values[idx][:, None] - - return chunk_id, peak_dirs, peak_values, peak_indices - - -def peaks_from_sh(shm_coeff, sphere, mask=None, relative_peak_threshold=0.5, - absolute_threshold=0, min_separation_angle=25, - normalize_peaks=False, npeaks=5, - sh_basis_type='descoteaux07', nbr_processes=None, - full_basis=False, is_symmetric=True): - """Computes peaks from given spherical harmonic coefficients - - Parameters - ---------- - shm_coeff : np.ndarray - Spherical harmonic coefficients - sphere : Sphere - The Sphere providing discrete directions for evaluation. - mask : np.ndarray, optional - If `mask` is provided, only the data inside the mask will be - used for computations. - relative_peak_threshold : float, optional - Only return peaks greater than ``relative_peak_threshold * m`` where m - is the largest peak. - Default: 0.5 - absolute_threshold : float, optional - Absolute threshold on fODF amplitude. This value should be set to - approximately 1.5 to 2 times the maximum fODF amplitude in isotropic - voxels (ex. ventricles). `scil_compute_fodf_max_in_ventricles.py` - can be used to find the maximal value. - Default: 0 - min_separation_angle : float in [0, 90], optional - The minimum distance between directions. If two peaks are too close - only the larger of the two is returned. - Default: 25 - normalize_peaks : bool, optional - If true, all peak values are calculated relative to `max(odf)`. - npeaks : int, optional - Maximum number of peaks found (default 5 peaks). - sh_basis_type : str, optional - Type of spherical harmonic basis used for `shm_coeff`. Either - `descoteaux07` or `tournier07`. - Default: `descoteaux07` - nbr_processes: int, optional - The number of subprocesses to use. - Default: multiprocessing.cpu_count() - full_basis: bool, optional - If True, SH coefficients are expressed using a full basis. - Default: False - is_symmetric: bool, optional - If False, antipodal sphere directions are considered distinct. - Default: True - - Returns - ------- - tuple of np.ndarray - peak_dirs, peak_values, peak_indices - """ - sh_order = order_from_ncoef(shm_coeff.shape[-1], full_basis) - B, _ = sh_to_sf_matrix(sphere, sh_order, sh_basis_type, full_basis) - - data_shape = shm_coeff.shape - if mask is None: - mask = np.sum(shm_coeff, axis=3).astype(bool) - - nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \ - or nbr_processes < 0 else nbr_processes - - # Ravel the first 3 dimensions while keeping the 4th intact, like a list of - # 1D time series voxels. Then separate it in chunks of len(nbr_processes). - shm_coeff = shm_coeff[mask].reshape( - (np.count_nonzero(mask), data_shape[3])) - chunks = np.array_split(shm_coeff, nbr_processes) - chunk_len = np.cumsum([0] + [len(c) for c in chunks]) - - pool = multiprocessing.Pool(nbr_processes) - results = pool.map(peaks_from_sh_parallel, - zip(chunks, - itertools.repeat(B), - itertools.repeat(sphere), - itertools.repeat(relative_peak_threshold), - itertools.repeat(absolute_threshold), - itertools.repeat(min_separation_angle), - itertools.repeat(npeaks), - itertools.repeat(normalize_peaks), - np.arange(len(chunks)), - itertools.repeat(is_symmetric))) - pool.close() - pool.join() - - # Re-assemble the chunk together in the original shape. - peak_dirs_array = np.zeros(data_shape[0:3] + (npeaks, 3)) - peak_values_array = np.zeros(data_shape[0:3] + (npeaks,)) - peak_indices_array = np.zeros(data_shape[0:3] + (npeaks,)) - - # tmp arrays are neccesary to avoid inserting data in returned variable - # rather than the original array - tmp_peak_dirs_array = np.zeros((np.count_nonzero(mask), npeaks, 3)) - tmp_peak_values_array = np.zeros((np.count_nonzero(mask), npeaks)) - tmp_peak_indices_array = np.zeros((np.count_nonzero(mask), npeaks)) - for i, peak_dirs, peak_values, peak_indices in results: - tmp_peak_dirs_array[chunk_len[i]:chunk_len[i+1], :, :] = peak_dirs - tmp_peak_values_array[chunk_len[i]:chunk_len[i+1], :] = peak_values - tmp_peak_indices_array[chunk_len[i]:chunk_len[i+1], :] = peak_indices - - peak_dirs_array[mask] = tmp_peak_dirs_array - peak_values_array[mask] = tmp_peak_values_array - peak_indices_array[mask] = tmp_peak_indices_array - - return peak_dirs_array, peak_values_array, peak_indices_array - - -def maps_from_sh_parallel(args): - shm_coeff = args[0] - _ = args[1] - peak_values = args[2] - peak_indices = args[3] - B = args[4] - sphere = args[5] - gfa_thr = args[6] - chunk_id = args[7] - - data_shape = shm_coeff.shape[0] - nufo_map = np.zeros(data_shape) - afd_max = np.zeros(data_shape) - afd_sum = np.zeros(data_shape) - rgb_map = np.zeros((data_shape, 3)) - gfa_map = np.zeros(data_shape) - qa_map = np.zeros((data_shape, peak_values.shape[1])) - - max_odf = 0 - global_max = -np.inf - for idx in range(len(shm_coeff)): - if shm_coeff[idx].any(): - odf = np.dot(shm_coeff[idx], B) - odf = odf.clip(min=0) - sum_odf = np.sum(odf) - max_odf = np.maximum(max_odf, sum_odf) - if sum_odf > 0: - rgb_map[idx] = np.dot(np.abs(sphere.vertices).T, odf) - rgb_map[idx] /= np.linalg.norm(rgb_map[idx]) - rgb_map[idx] *= sum_odf - gfa_map[idx] = gfa(odf) - if gfa_map[idx] < gfa_thr: - global_max = max(global_max, odf.max()) - elif np.sum(peak_indices[idx] > -1): - nufo_map[idx] = np.sum(peak_indices[idx] > -1) - afd_max[idx] = peak_values[idx].max() - afd_sum[idx] = np.sqrt(np.dot(shm_coeff[idx], shm_coeff[idx])) - qa_map = peak_values[idx] - odf.min() - global_max = max(global_max, peak_values[idx][0]) - - return chunk_id, nufo_map, afd_max, afd_sum, rgb_map, \ - gfa_map, qa_map, max_odf, global_max - - -def maps_from_sh(shm_coeff, peak_dirs, peak_values, peak_indices, sphere, - mask=None, gfa_thr=0, sh_basis_type='descoteaux07', - nbr_processes=None): - """Computes maps from given SH coefficients and peaks - - Parameters - ---------- - shm_coeff : np.ndarray - Spherical harmonic coefficients - peak_dirs : np.ndarray - Peak directions - peak_values : np.ndarray - Peak values - peak_indices : np.ndarray - Peak indices - sphere : Sphere - The Sphere providing discrete directions for evaluation. - mask : np.ndarray, optional - If `mask` is provided, only the data inside the mask will be - used for computations. - gfa_thr : float, optional - Voxels with gfa less than `gfa_thr` are skipped for all metrics, except - `rgb_map`. - Default: 0 - sh_basis_type : str, optional - Type of spherical harmonic basis used for `shm_coeff`. Either - `descoteaux07` or `tournier07`. - Default: `descoteaux07` - nbr_processes: int, optional - The number of subprocesses to use. - Default: multiprocessing.cpu_count() - - Returns - ------- - tuple of np.ndarray - nufo_map, afd_max, afd_sum, rgb_map, gfa, qa - """ - sh_order = order_from_ncoef(shm_coeff.shape[-1]) - B, _ = sh_to_sf_matrix(sphere, sh_order, sh_basis_type) - - data_shape = shm_coeff.shape - if mask is None: - mask = np.sum(shm_coeff, axis=3).astype(bool) - - nbr_processes = multiprocessing.cpu_count() \ - if nbr_processes is None or nbr_processes < 0 \ - else nbr_processes - - npeaks = peak_values.shape[3] - # Ravel the first 3 dimensions while keeping the 4th intact, like a list of - # 1D time series voxels. Then separate it in chunks of len(nbr_processes). - shm_coeff = shm_coeff[mask].reshape( - (np.count_nonzero(mask), data_shape[3])) - peak_dirs = peak_dirs[mask].reshape((np.count_nonzero(mask), npeaks, 3)) - peak_values = peak_values[mask].reshape((np.count_nonzero(mask), npeaks)) - peak_indices = peak_indices[mask].reshape((np.count_nonzero(mask), npeaks)) - shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes) - peak_dirs_chunks = np.array_split(peak_dirs, nbr_processes) - peak_values_chunks = np.array_split(peak_values, nbr_processes) - peak_indices_chunks = np.array_split(peak_indices, nbr_processes) - chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks]) - - pool = multiprocessing.Pool(nbr_processes) - results = pool.map(maps_from_sh_parallel, - zip(shm_coeff_chunks, - peak_dirs_chunks, - peak_values_chunks, - peak_indices_chunks, - itertools.repeat(B), - itertools.repeat(sphere), - itertools.repeat(gfa_thr), - np.arange(len(shm_coeff_chunks)))) - pool.close() - pool.join() - - # Re-assemble the chunk together in the original shape. - nufo_map_array = np.zeros(data_shape[0:3]) - afd_max_array = np.zeros(data_shape[0:3]) - afd_sum_array = np.zeros(data_shape[0:3]) - rgb_map_array = np.zeros(data_shape[0:3] + (3,)) - gfa_map_array = np.zeros(data_shape[0:3]) - qa_map_array = np.zeros(data_shape[0:3] + (npeaks,)) - - # tmp arrays are neccesary to avoid inserting data in returned variable - # rather than the original array - tmp_nufo_map_array = np.zeros((np.count_nonzero(mask))) - tmp_afd_max_array = np.zeros((np.count_nonzero(mask))) - tmp_afd_sum_array = np.zeros((np.count_nonzero(mask))) - tmp_rgb_map_array = np.zeros((np.count_nonzero(mask), 3)) - tmp_gfa_map_array = np.zeros((np.count_nonzero(mask))) - tmp_qa_map_array = np.zeros((np.count_nonzero(mask), npeaks)) - - all_time_max_odf = -np.inf - all_time_global_max = -np.inf - for (i, nufo_map, afd_max, afd_sum, rgb_map, - gfa_map, qa_map, max_odf, global_max) in results: - all_time_max_odf = max(all_time_global_max, max_odf) - all_time_global_max = max(all_time_global_max, global_max) - - tmp_nufo_map_array[chunk_len[i]:chunk_len[i+1]] = nufo_map - tmp_afd_max_array[chunk_len[i]:chunk_len[i+1]] = afd_max - tmp_afd_sum_array[chunk_len[i]:chunk_len[i+1]] = afd_sum - tmp_rgb_map_array[chunk_len[i]:chunk_len[i+1], :] = rgb_map - tmp_gfa_map_array[chunk_len[i]:chunk_len[i+1]] = gfa_map - tmp_qa_map_array[chunk_len[i]:chunk_len[i+1], :] = qa_map - - nufo_map_array[mask] = tmp_nufo_map_array - afd_max_array[mask] = tmp_afd_max_array - afd_sum_array[mask] = tmp_afd_sum_array - rgb_map_array[mask] = tmp_rgb_map_array - gfa_map_array[mask] = tmp_gfa_map_array - qa_map_array[mask] = tmp_qa_map_array - - rgb_map_array /= all_time_max_odf - rgb_map_array *= 255 - qa_map_array /= all_time_global_max - - afd_unique = np.unique(afd_max_array) - if np.array_equal(np.array([0, 1]), afd_unique) \ - or np.array_equal(np.array([1]), afd_unique): - logging.warning('All AFD_max values are 1. The peaks seem normalized.') - - return(nufo_map_array, afd_max_array, afd_sum_array, - rgb_map_array, gfa_map_array, qa_map_array) - - -def convert_sh_basis_parallel(args): - sh = args[0] - B_in = args[1] - invB_out = args[2] - chunk_id = args[3] - - for idx in range(sh.shape[0]): - if sh[idx].any(): - sf = np.dot(sh[idx], B_in) - sh[idx] = np.dot(sf, invB_out) - - return chunk_id, sh - - -def convert_sh_basis(shm_coeff, sphere, mask=None, - input_basis='descoteaux07', nbr_processes=None, - is_input_legacy=True, is_output_legacy=True): - """Converts spherical harmonic coefficients between two bases - - Parameters - ---------- - shm_coeff : np.ndarray - Spherical harmonic coefficients - sphere : Sphere - The Sphere providing discrete directions for evaluation. - mask : np.ndarray, optional - If `mask` is provided, only the data inside the mask will be - used for computations. - input_basis : str, optional - Type of spherical harmonic basis used for `shm_coeff`. Either - `descoteaux07` or `tournier07`. - Default: `descoteaux07` - nbr_processes: int, optional - The number of subprocesses to use. - Default: multiprocessing.cpu_count() - is_input_legacy: bool, optional - If true, this means that the input SH used a legacy basis definition - for backward compatibility with previous ``tournier07`` and - ``descoteaux07`` implementations. - Default: True - is_output_legacy: bool, optional - If true, this means that the output SH will use a legacy basis - definition for backward compatibility with previous ``tournier07`` and - ``descoteaux07`` implementations. - Default: True - - Returns - ------- - shm_coeff_array : np.ndarray - Spherical harmonic coefficients in the desired basis. - """ - output_basis = 'descoteaux07' \ - if input_basis == 'tournier07' \ - else 'tournier07' - - sh_order = order_from_ncoef(shm_coeff.shape[-1]) - B_in, _ = sh_to_sf_matrix(sphere, sh_order, input_basis, - legacy=is_input_legacy) - _, invB_out = sh_to_sf_matrix(sphere, sh_order, output_basis, - legacy=is_output_legacy) - - data_shape = shm_coeff.shape - if mask is None: - mask = np.sum(shm_coeff, axis=3).astype(bool) - - nbr_processes = multiprocessing.cpu_count() \ - if nbr_processes is None or nbr_processes < 0 \ - else nbr_processes - - # Ravel the first 3 dimensions while keeping the 4th intact, like a list of - # 1D time series voxels. Then separate it in chunks of len(nbr_processes). - shm_coeff = shm_coeff[mask].reshape( - (np.count_nonzero(mask), data_shape[3])) - shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes) - chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks]) - - pool = multiprocessing.Pool(nbr_processes) - results = pool.map(convert_sh_basis_parallel, - zip(shm_coeff_chunks, - itertools.repeat(B_in), - itertools.repeat(invB_out), - np.arange(len(shm_coeff_chunks)))) - pool.close() - pool.join() - - # Re-assemble the chunk together in the original shape. - shm_coeff_array = np.zeros(data_shape) - tmp_shm_coeff_array = np.zeros((np.count_nonzero(mask), data_shape[3])) - for i, new_shm_coeff in results: - tmp_shm_coeff_array[chunk_len[i]:chunk_len[i+1], :] = new_shm_coeff - - shm_coeff_array[mask] = tmp_shm_coeff_array - - return shm_coeff_array - - -def convert_sh_to_sf_parallel(args): - sh = args[0] - B_in = args[1] - new_output_dim = args[2] - chunk_id = args[3] - sf = np.zeros((sh.shape[0], new_output_dim), dtype=np.float32) - - for idx in range(sh.shape[0]): - if sh[idx].any(): - sf[idx] = np.dot(sh[idx], B_in) - - return chunk_id, sf - - -def convert_sh_to_sf(shm_coeff, sphere, mask=None, dtype="float32", - input_basis='descoteaux07', input_full_basis=False, - nbr_processes=multiprocessing.cpu_count()): - """Converts spherical harmonic coefficients to an SF sphere - - Parameters - ---------- - shm_coeff : np.ndarray - Spherical harmonic coefficients - sphere : Sphere - The Sphere providing discrete directions for evaluation. - mask : np.ndarray, optional - If `mask` is provided, only the data inside the mask will be - used for computations. - dtype : str - Datatype to use for computation and output array. - Either `float32` or `float64`. Default: `float32` - input_basis : str, optional - Type of spherical harmonic basis used for `shm_coeff`. Either - `descoteaux07` or `tournier07`. - Default: `descoteaux07` - input_full_basis : bool - If True, use a full SH basis (even and odd orders) for the input SH - coefficients. - nbr_processes: int, optional - The number of subprocesses to use. - Default: multiprocessing.cpu_count() - - Returns - ------- - shm_coeff_array : np.ndarray - Spherical harmonic coefficients in the desired basis. - """ - assert dtype in ["float32", "float64"], "Only `float32` and `float64` " \ - "should be used." - - sh_order = order_from_ncoef(shm_coeff.shape[-1], - full_basis=input_full_basis) - B_in, _ = sh_to_sf_matrix(sphere, sh_order, basis_type=input_basis, - full_basis=input_full_basis) - B_in = B_in.astype(dtype) - - data_shape = shm_coeff.shape - if mask is None: - mask = np.sum(shm_coeff, axis=3).astype(bool) - - # Ravel the first 3 dimensions while keeping the 4th intact, like a list of - # 1D time series voxels. Then separate it in chunks of len(nbr_processes). - shm_coeff = shm_coeff[mask].reshape( - (np.count_nonzero(mask), data_shape[3])) - shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes) - chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks]) - - pool = multiprocessing.Pool(nbr_processes) - results = pool.map(convert_sh_to_sf_parallel, - zip(shm_coeff_chunks, - itertools.repeat(B_in), - itertools.repeat(len(sphere.vertices)), - np.arange(len(shm_coeff_chunks)))) - pool.close() - pool.join() - - # Re-assemble the chunk together in the original shape. - new_shape = data_shape[:3] + (len(sphere.vertices),) - sf_array = np.zeros(new_shape, dtype=dtype) - tmp_sf_array = np.zeros((np.count_nonzero(mask), new_shape[3]), - dtype=dtype) - for i, new_sf in results: - tmp_sf_array[chunk_len[i]:chunk_len[i + 1], :] = new_sf - - sf_array[mask] = tmp_sf_array - - return sf_array - - -def fit_gamma_parallel(args): - data = args[0] - gtab_infos = args[1] - fit_iters = args[2] - random_iters = args[3] - do_weight_bvals = args[4] - do_weight_pa = args[5] - do_multiple_s0 = args[6] - chunk_id = args[7] - - sub_fit_array = np.zeros((data.shape[0], 4)) - for i in range(data.shape[0]): - if data[i].any(): - sub_fit_array[i] = gamma_data2fit(data[i], gtab_infos, fit_iters, - random_iters, do_weight_bvals, - do_weight_pa, do_multiple_s0) - - return chunk_id, sub_fit_array - - -def fit_gamma(data, gtab_infos, mask=None, fit_iters=1, random_iters=50, - do_weight_bvals=False, do_weight_pa=False, do_multiple_s0=False, - nbr_processes=None): - """Fit the gamma model to data - - Parameters - ---------- - data : np.ndarray (4d) - Diffusion data, powder averaged. Obtained as output of the function - `reconst.b_tensor_utils.generate_powder_averaged_data`. - gtab_infos : np.ndarray - Contains information about the gtab, such as the unique bvals, the - encoding types, the number of directions and the acquisition index. - Obtained as output of the function - `reconst.b_tensor_utils.generate_powder_averaged_data`. - mask : np.ndarray, optional - If `mask` is provided, only the data inside the mask will be - used for computations. - fit_iters : int, optional - Number of iterations in the gamma fit. Defaults to 1. - random_iters : int, optional - Number of random sets of parameters tested to find the initial - parameters. Defaults to 50. - do_weight_bvals : bool , optional - If set, does a weighting on the bvalues in the gamma fit. - do_weight_pa : bool, optional - If set, does a powder averaging weighting in the gamma fit. - do_multiple_s0 : bool, optional - If set, takes into account multiple baseline signals. - nbr_processes : int, optional - The number of subprocesses to use. - Default: multiprocessing.cpu_count() - - Returns - ------- - fit_array : np.ndarray - Array containing the fit - """ - data_shape = data.shape - if mask is None: - mask = np.sum(data, axis=3).astype(bool) - - nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \ - or nbr_processes <= 0 else nbr_processes - - # Ravel the first 3 dimensions while keeping the 4th intact, like a list of - # 1D time series voxels. Then separate it in chunks of len(nbr_processes). - data = data[mask].reshape((np.count_nonzero(mask), data_shape[3])) - chunks = np.array_split(data, nbr_processes) - - chunk_len = np.cumsum([0] + [len(c) for c in chunks]) - pool = multiprocessing.Pool(nbr_processes) - results = pool.map(fit_gamma_parallel, - zip(chunks, - itertools.repeat(gtab_infos), - itertools.repeat(fit_iters), - itertools.repeat(random_iters), - itertools.repeat(do_weight_bvals), - itertools.repeat(do_weight_pa), - itertools.repeat(do_multiple_s0), - np.arange(len(chunks)))) - pool.close() - pool.join() - - # Re-assemble the chunk together in the original shape. - fit_array = np.zeros((data_shape[0:3])+(4,)) - tmp_fit_array = np.zeros((np.count_nonzero(mask), 4)) - for i, fit in results: - tmp_fit_array[chunk_len[i]:chunk_len[i+1]] = fit - - fit_array[mask] = tmp_fit_array - - return fit_array diff --git a/scilpy/reconst/sh.py b/scilpy/reconst/sh.py index de09d2c15..2462e5669 100644 --- a/scilpy/reconst/sh.py +++ b/scilpy/reconst/sh.py @@ -1,6 +1,13 @@ # -*- coding: utf-8 -*- +import itertools +import logging +import multiprocessing import numpy as np -from dipy.reconst.shm import order_from_ncoef, sph_harm_ind_list + +from dipy.direction.peaks import peak_directions +from dipy.reconst.odf import gfa +from dipy.reconst.shm import (sh_to_sf_matrix, order_from_ncoef, + sph_harm_ind_list) def compute_rish(sh, mask=None, full_basis=False): @@ -64,3 +71,507 @@ def compute_rish(sh, mask=None, full_basis=False): orders = sorted(np.unique(order_ids)) return rish, orders + + +def _peaks_from_sh_parallel(args): + shm_coeff = args[0] + B = args[1] + sphere = args[2] + relative_peak_threshold = args[3] + absolute_threshold = args[4] + min_separation_angle = args[5] + npeaks = args[6] + normalize_peaks = args[7] + chunk_id = args[8] + is_symmetric = args[9] + + data_shape = shm_coeff.shape[0] + peak_dirs = np.zeros((data_shape, npeaks, 3)) + peak_values = np.zeros((data_shape, npeaks)) + peak_indices = np.zeros((data_shape, npeaks), dtype='int') + peak_indices.fill(-1) + + for idx in range(len(shm_coeff)): + if shm_coeff[idx].any(): + odf = np.dot(shm_coeff[idx], B) + odf[odf < absolute_threshold] = 0. + + dirs, peaks, ind = peak_directions(odf, sphere, + relative_peak_threshold, + min_separation_angle, + is_symmetric) + + if peaks.shape[0] != 0: + n = min(npeaks, peaks.shape[0]) + + peak_dirs[idx][:n] = dirs[:n] + peak_indices[idx][:n] = ind[:n] + peak_values[idx][:n] = peaks[:n] + + if normalize_peaks: + peak_values[idx][:n] /= peaks[0] + peak_dirs[idx] *= peak_values[idx][:, None] + + return chunk_id, peak_dirs, peak_values, peak_indices + + +def peaks_from_sh(shm_coeff, sphere, mask=None, relative_peak_threshold=0.5, + absolute_threshold=0, min_separation_angle=25, + normalize_peaks=False, npeaks=5, + sh_basis_type='descoteaux07', nbr_processes=None, + full_basis=False, is_symmetric=True): + """Computes peaks from given spherical harmonic coefficients + + Parameters + ---------- + shm_coeff : np.ndarray + Spherical harmonic coefficients + sphere : Sphere + The Sphere providing discrete directions for evaluation. + mask : np.ndarray, optional + If `mask` is provided, only the data inside the mask will be + used for computations. + relative_peak_threshold : float, optional + Only return peaks greater than ``relative_peak_threshold * m`` where m + is the largest peak. + Default: 0.5 + absolute_threshold : float, optional + Absolute threshold on fODF amplitude. This value should be set to + approximately 1.5 to 2 times the maximum fODF amplitude in isotropic + voxels (ex. ventricles). `scil_compute_fodf_max_in_ventricles.py` + can be used to find the maximal value. + Default: 0 + min_separation_angle : float in [0, 90], optional + The minimum distance between directions. If two peaks are too close + only the larger of the two is returned. + Default: 25 + normalize_peaks : bool, optional + If true, all peak values are calculated relative to `max(odf)`. + npeaks : int, optional + Maximum number of peaks found (default 5 peaks). + sh_basis_type : str, optional + Type of spherical harmonic basis used for `shm_coeff`. Either + `descoteaux07` or `tournier07`. + Default: `descoteaux07` + nbr_processes: int, optional + The number of subprocesses to use. + Default: multiprocessing.cpu_count() + full_basis: bool, optional + If True, SH coefficients are expressed using a full basis. + Default: False + is_symmetric: bool, optional + If False, antipodal sphere directions are considered distinct. + Default: True + + Returns + ------- + tuple of np.ndarray + peak_dirs, peak_values, peak_indices + """ + sh_order = order_from_ncoef(shm_coeff.shape[-1], full_basis) + B, _ = sh_to_sf_matrix(sphere, sh_order, sh_basis_type, full_basis) + + data_shape = shm_coeff.shape + if mask is None: + mask = np.sum(shm_coeff, axis=3).astype(bool) + + nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \ + or nbr_processes < 0 else nbr_processes + + # Ravel the first 3 dimensions while keeping the 4th intact, like a list of + # 1D time series voxels. Then separate it in chunks of len(nbr_processes). + shm_coeff = shm_coeff[mask].reshape( + (np.count_nonzero(mask), data_shape[3])) + chunks = np.array_split(shm_coeff, nbr_processes) + chunk_len = np.cumsum([0] + [len(c) for c in chunks]) + + pool = multiprocessing.Pool(nbr_processes) + results = pool.map(_peaks_from_sh_parallel, + zip(chunks, + itertools.repeat(B), + itertools.repeat(sphere), + itertools.repeat(relative_peak_threshold), + itertools.repeat(absolute_threshold), + itertools.repeat(min_separation_angle), + itertools.repeat(npeaks), + itertools.repeat(normalize_peaks), + np.arange(len(chunks)), + itertools.repeat(is_symmetric))) + pool.close() + pool.join() + + # Re-assemble the chunk together in the original shape. + peak_dirs_array = np.zeros(data_shape[0:3] + (npeaks, 3)) + peak_values_array = np.zeros(data_shape[0:3] + (npeaks,)) + peak_indices_array = np.zeros(data_shape[0:3] + (npeaks,)) + + # tmp arrays are neccesary to avoid inserting data in returned variable + # rather than the original array + tmp_peak_dirs_array = np.zeros((np.count_nonzero(mask), npeaks, 3)) + tmp_peak_values_array = np.zeros((np.count_nonzero(mask), npeaks)) + tmp_peak_indices_array = np.zeros((np.count_nonzero(mask), npeaks)) + for i, peak_dirs, peak_values, peak_indices in results: + tmp_peak_dirs_array[chunk_len[i]:chunk_len[i+1], :, :] = peak_dirs + tmp_peak_values_array[chunk_len[i]:chunk_len[i+1], :] = peak_values + tmp_peak_indices_array[chunk_len[i]:chunk_len[i+1], :] = peak_indices + + peak_dirs_array[mask] = tmp_peak_dirs_array + peak_values_array[mask] = tmp_peak_values_array + peak_indices_array[mask] = tmp_peak_indices_array + + return peak_dirs_array, peak_values_array, peak_indices_array + + +def _maps_from_sh_parallel(args): + shm_coeff = args[0] + _ = args[1] + peak_values = args[2] + peak_indices = args[3] + B = args[4] + sphere = args[5] + gfa_thr = args[6] + chunk_id = args[7] + + data_shape = shm_coeff.shape[0] + nufo_map = np.zeros(data_shape) + afd_max = np.zeros(data_shape) + afd_sum = np.zeros(data_shape) + rgb_map = np.zeros((data_shape, 3)) + gfa_map = np.zeros(data_shape) + qa_map = np.zeros((data_shape, peak_values.shape[1])) + + max_odf = 0 + global_max = -np.inf + for idx in range(len(shm_coeff)): + if shm_coeff[idx].any(): + odf = np.dot(shm_coeff[idx], B) + odf = odf.clip(min=0) + sum_odf = np.sum(odf) + max_odf = np.maximum(max_odf, sum_odf) + if sum_odf > 0: + rgb_map[idx] = np.dot(np.abs(sphere.vertices).T, odf) + rgb_map[idx] /= np.linalg.norm(rgb_map[idx]) + rgb_map[idx] *= sum_odf + gfa_map[idx] = gfa(odf) + if gfa_map[idx] < gfa_thr: + global_max = max(global_max, odf.max()) + elif np.sum(peak_indices[idx] > -1): + nufo_map[idx] = np.sum(peak_indices[idx] > -1) + afd_max[idx] = peak_values[idx].max() + afd_sum[idx] = np.sqrt(np.dot(shm_coeff[idx], shm_coeff[idx])) + qa_map = peak_values[idx] - odf.min() + global_max = max(global_max, peak_values[idx][0]) + + return chunk_id, nufo_map, afd_max, afd_sum, rgb_map, \ + gfa_map, qa_map, max_odf, global_max + + +def maps_from_sh(shm_coeff, peak_dirs, peak_values, peak_indices, sphere, + mask=None, gfa_thr=0, sh_basis_type='descoteaux07', + nbr_processes=None): + """Computes maps from given SH coefficients and peaks + + Parameters + ---------- + shm_coeff : np.ndarray + Spherical harmonic coefficients + peak_dirs : np.ndarray + Peak directions + peak_values : np.ndarray + Peak values + peak_indices : np.ndarray + Peak indices + sphere : Sphere + The Sphere providing discrete directions for evaluation. + mask : np.ndarray, optional + If `mask` is provided, only the data inside the mask will be + used for computations. + gfa_thr : float, optional + Voxels with gfa less than `gfa_thr` are skipped for all metrics, except + `rgb_map`. + Default: 0 + sh_basis_type : str, optional + Type of spherical harmonic basis used for `shm_coeff`. Either + `descoteaux07` or `tournier07`. + Default: `descoteaux07` + nbr_processes: int, optional + The number of subprocesses to use. + Default: multiprocessing.cpu_count() + + Returns + ------- + tuple of np.ndarray + nufo_map, afd_max, afd_sum, rgb_map, gfa, qa + """ + sh_order = order_from_ncoef(shm_coeff.shape[-1]) + B, _ = sh_to_sf_matrix(sphere, sh_order, sh_basis_type) + + data_shape = shm_coeff.shape + if mask is None: + mask = np.sum(shm_coeff, axis=3).astype(bool) + + nbr_processes = multiprocessing.cpu_count() \ + if nbr_processes is None or nbr_processes < 0 \ + else nbr_processes + + npeaks = peak_values.shape[3] + # Ravel the first 3 dimensions while keeping the 4th intact, like a list of + # 1D time series voxels. Then separate it in chunks of len(nbr_processes). + shm_coeff = shm_coeff[mask].reshape( + (np.count_nonzero(mask), data_shape[3])) + peak_dirs = peak_dirs[mask].reshape((np.count_nonzero(mask), npeaks, 3)) + peak_values = peak_values[mask].reshape((np.count_nonzero(mask), npeaks)) + peak_indices = peak_indices[mask].reshape((np.count_nonzero(mask), npeaks)) + shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes) + peak_dirs_chunks = np.array_split(peak_dirs, nbr_processes) + peak_values_chunks = np.array_split(peak_values, nbr_processes) + peak_indices_chunks = np.array_split(peak_indices, nbr_processes) + chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks]) + + pool = multiprocessing.Pool(nbr_processes) + results = pool.map(_maps_from_sh_parallel, + zip(shm_coeff_chunks, + peak_dirs_chunks, + peak_values_chunks, + peak_indices_chunks, + itertools.repeat(B), + itertools.repeat(sphere), + itertools.repeat(gfa_thr), + np.arange(len(shm_coeff_chunks)))) + pool.close() + pool.join() + + # Re-assemble the chunk together in the original shape. + nufo_map_array = np.zeros(data_shape[0:3]) + afd_max_array = np.zeros(data_shape[0:3]) + afd_sum_array = np.zeros(data_shape[0:3]) + rgb_map_array = np.zeros(data_shape[0:3] + (3,)) + gfa_map_array = np.zeros(data_shape[0:3]) + qa_map_array = np.zeros(data_shape[0:3] + (npeaks,)) + + # tmp arrays are neccesary to avoid inserting data in returned variable + # rather than the original array + tmp_nufo_map_array = np.zeros((np.count_nonzero(mask))) + tmp_afd_max_array = np.zeros((np.count_nonzero(mask))) + tmp_afd_sum_array = np.zeros((np.count_nonzero(mask))) + tmp_rgb_map_array = np.zeros((np.count_nonzero(mask), 3)) + tmp_gfa_map_array = np.zeros((np.count_nonzero(mask))) + tmp_qa_map_array = np.zeros((np.count_nonzero(mask), npeaks)) + + all_time_max_odf = -np.inf + all_time_global_max = -np.inf + for (i, nufo_map, afd_max, afd_sum, rgb_map, + gfa_map, qa_map, max_odf, global_max) in results: + all_time_max_odf = max(all_time_global_max, max_odf) + all_time_global_max = max(all_time_global_max, global_max) + + tmp_nufo_map_array[chunk_len[i]:chunk_len[i+1]] = nufo_map + tmp_afd_max_array[chunk_len[i]:chunk_len[i+1]] = afd_max + tmp_afd_sum_array[chunk_len[i]:chunk_len[i+1]] = afd_sum + tmp_rgb_map_array[chunk_len[i]:chunk_len[i+1], :] = rgb_map + tmp_gfa_map_array[chunk_len[i]:chunk_len[i+1]] = gfa_map + tmp_qa_map_array[chunk_len[i]:chunk_len[i+1], :] = qa_map + + nufo_map_array[mask] = tmp_nufo_map_array + afd_max_array[mask] = tmp_afd_max_array + afd_sum_array[mask] = tmp_afd_sum_array + rgb_map_array[mask] = tmp_rgb_map_array + gfa_map_array[mask] = tmp_gfa_map_array + qa_map_array[mask] = tmp_qa_map_array + + rgb_map_array /= all_time_max_odf + rgb_map_array *= 255 + qa_map_array /= all_time_global_max + + afd_unique = np.unique(afd_max_array) + if np.array_equal(np.array([0, 1]), afd_unique) \ + or np.array_equal(np.array([1]), afd_unique): + logging.warning('All AFD_max values are 1. The peaks seem normalized.') + + return(nufo_map_array, afd_max_array, afd_sum_array, + rgb_map_array, gfa_map_array, qa_map_array) + + +def _convert_sh_basis_parallel(args): + sh = args[0] + B_in = args[1] + invB_out = args[2] + chunk_id = args[3] + + for idx in range(sh.shape[0]): + if sh[idx].any(): + sf = np.dot(sh[idx], B_in) + sh[idx] = np.dot(sf, invB_out) + + return chunk_id, sh + + +def convert_sh_basis(shm_coeff, sphere, mask=None, + input_basis='descoteaux07', nbr_processes=None, + is_input_legacy=True, is_output_legacy=True): + """Converts spherical harmonic coefficients between two bases + + Parameters + ---------- + shm_coeff : np.ndarray + Spherical harmonic coefficients + sphere : Sphere + The Sphere providing discrete directions for evaluation. + mask : np.ndarray, optional + If `mask` is provided, only the data inside the mask will be + used for computations. + input_basis : str, optional + Type of spherical harmonic basis used for `shm_coeff`. Either + `descoteaux07` or `tournier07`. + Default: `descoteaux07` + nbr_processes: int, optional + The number of subprocesses to use. + Default: multiprocessing.cpu_count() + is_input_legacy: bool, optional + If true, this means that the input SH used a legacy basis definition + for backward compatibility with previous ``tournier07`` and + ``descoteaux07`` implementations. + Default: True + is_output_legacy: bool, optional + If true, this means that the output SH will use a legacy basis + definition for backward compatibility with previous ``tournier07`` and + ``descoteaux07`` implementations. + Default: True + + Returns + ------- + shm_coeff_array : np.ndarray + Spherical harmonic coefficients in the desired basis. + """ + output_basis = 'descoteaux07' \ + if input_basis == 'tournier07' \ + else 'tournier07' + + sh_order = order_from_ncoef(shm_coeff.shape[-1]) + B_in, _ = sh_to_sf_matrix(sphere, sh_order, input_basis, + legacy=is_input_legacy) + _, invB_out = sh_to_sf_matrix(sphere, sh_order, output_basis, + legacy=is_output_legacy) + + data_shape = shm_coeff.shape + if mask is None: + mask = np.sum(shm_coeff, axis=3).astype(bool) + + nbr_processes = multiprocessing.cpu_count() \ + if nbr_processes is None or nbr_processes < 0 \ + else nbr_processes + + # Ravel the first 3 dimensions while keeping the 4th intact, like a list of + # 1D time series voxels. Then separate it in chunks of len(nbr_processes). + shm_coeff = shm_coeff[mask].reshape( + (np.count_nonzero(mask), data_shape[3])) + shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes) + chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks]) + + pool = multiprocessing.Pool(nbr_processes) + results = pool.map(_convert_sh_basis_parallel, + zip(shm_coeff_chunks, + itertools.repeat(B_in), + itertools.repeat(invB_out), + np.arange(len(shm_coeff_chunks)))) + pool.close() + pool.join() + + # Re-assemble the chunk together in the original shape. + shm_coeff_array = np.zeros(data_shape) + tmp_shm_coeff_array = np.zeros((np.count_nonzero(mask), data_shape[3])) + for i, new_shm_coeff in results: + tmp_shm_coeff_array[chunk_len[i]:chunk_len[i+1], :] = new_shm_coeff + + shm_coeff_array[mask] = tmp_shm_coeff_array + + return shm_coeff_array + + +def _convert_sh_to_sf_parallel(args): + sh = args[0] + B_in = args[1] + new_output_dim = args[2] + chunk_id = args[3] + sf = np.zeros((sh.shape[0], new_output_dim), dtype=np.float32) + + for idx in range(sh.shape[0]): + if sh[idx].any(): + sf[idx] = np.dot(sh[idx], B_in) + + return chunk_id, sf + + +def convert_sh_to_sf(shm_coeff, sphere, mask=None, dtype="float32", + input_basis='descoteaux07', input_full_basis=False, + nbr_processes=multiprocessing.cpu_count()): + """Converts spherical harmonic coefficients to an SF sphere + + Parameters + ---------- + shm_coeff : np.ndarray + Spherical harmonic coefficients + sphere : Sphere + The Sphere providing discrete directions for evaluation. + mask : np.ndarray, optional + If `mask` is provided, only the data inside the mask will be + used for computations. + dtype : str + Datatype to use for computation and output array. + Either `float32` or `float64`. Default: `float32` + input_basis : str, optional + Type of spherical harmonic basis used for `shm_coeff`. Either + `descoteaux07` or `tournier07`. + Default: `descoteaux07` + input_full_basis : bool + If True, use a full SH basis (even and odd orders) for the input SH + coefficients. + nbr_processes: int, optional + The number of subprocesses to use. + Default: multiprocessing.cpu_count() + + Returns + ------- + shm_coeff_array : np.ndarray + Spherical harmonic coefficients in the desired basis. + """ + assert dtype in ["float32", "float64"], "Only `float32` and `float64` " \ + "should be used." + + sh_order = order_from_ncoef(shm_coeff.shape[-1], + full_basis=input_full_basis) + B_in, _ = sh_to_sf_matrix(sphere, sh_order, basis_type=input_basis, + full_basis=input_full_basis) + B_in = B_in.astype(dtype) + + data_shape = shm_coeff.shape + if mask is None: + mask = np.sum(shm_coeff, axis=3).astype(bool) + + # Ravel the first 3 dimensions while keeping the 4th intact, like a list of + # 1D time series voxels. Then separate it in chunks of len(nbr_processes). + shm_coeff = shm_coeff[mask].reshape( + (np.count_nonzero(mask), data_shape[3])) + shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes) + chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks]) + + pool = multiprocessing.Pool(nbr_processes) + results = pool.map(_convert_sh_to_sf_parallel, + zip(shm_coeff_chunks, + itertools.repeat(B_in), + itertools.repeat(len(sphere.vertices)), + np.arange(len(shm_coeff_chunks)))) + pool.close() + pool.join() + + # Re-assemble the chunk together in the original shape. + new_shape = data_shape[:3] + (len(sphere.vertices),) + sf_array = np.zeros(new_shape, dtype=dtype) + tmp_sf_array = np.zeros((np.count_nonzero(mask), new_shape[3]), + dtype=dtype) + for i, new_sf in results: + tmp_sf_array[chunk_len[i]:chunk_len[i + 1], :] = new_sf + + sf_array[mask] = tmp_sf_array + + return sf_array From 9f20f4a8b004805b125473bf650255c96c6e0826 Mon Sep 17 00:00:00 2001 From: karp2601 Date: Mon, 13 Nov 2023 10:53:17 -0500 Subject: [PATCH 02/10] Adjusting scripts and modules doc. --- docs/source/modules/scilpy.reconst.rst | 58 ++++++++++++++++++++++-- scilpy/dwi/operations.py | 1 + scripts/scil_compute_asym_odf_metrics.py | 2 +- scripts/scil_compute_divide.py | 3 +- scripts/scil_compute_fodf_metrics.py | 2 +- scripts/scil_compute_memsmt_fodf.py | 3 +- scripts/scil_compute_msmt_fodf.py | 8 ++-- scripts/scil_compute_sf_from_sh.py | 2 +- scripts/scil_compute_sh_from_signal.py | 2 +- scripts/scil_compute_ssst_fodf.py | 8 ++-- scripts/scil_convert_sh_basis.py | 2 +- 11 files changed, 72 insertions(+), 19 deletions(-) diff --git a/docs/source/modules/scilpy.reconst.rst b/docs/source/modules/scilpy.reconst.rst index 19ac1cb9e..40c0b66d0 100644 --- a/docs/source/modules/scilpy.reconst.rst +++ b/docs/source/modules/scilpy.reconst.rst @@ -1,10 +1,50 @@ scilpy.reconst package ====================== -scilpy.reconst.afd\_along\_streamlines module +scilpy.reconst.aodf module --------------------------------------------- -.. automodule:: scilpy.reconst.afd_along_streamlines +.. automodule:: scilpy.reconst.aodf + :members: + :undoc-members: + :show-inheritance: + +scilpy.reconst.bingham module +--------------------------------------------- + +.. automodule:: scilpy.reconst.bingham + :members: + :undoc-members: + :show-inheritance: + +scilpy.reconst.divide module +--------------------------------------------- + +.. automodule:: scilpy.reconst.divide + :members: + :undoc-members: + :show-inheritance: + +scilpy.reconst.dti module +--------------------------------------------- + +.. automodule:: scilpy.reconst.dti + :members: + :undoc-members: + :show-inheritance: + +scilpy.reconst.dwi module +--------------------------------- + +.. automodule:: scilpy.reconst.dwi + :members: + :undoc-members: + :show-inheritance: + +scilpy.reconst.fiber\_coherence module +--------------------------------------------- + +.. automodule:: scilpy.reconst.fiber_coherence :members: :undoc-members: :show-inheritance: @@ -25,10 +65,18 @@ scilpy.reconst.frf module :undoc-members: :show-inheritance: -scilpy.reconst.raw\_signal module ---------------------------------- +scilpy.reconst.mti module +--------------------------------------------- + +.. automodule:: scilpy.reconst.mti + :members: + :undoc-members: + :show-inheritance: + +scilpy.reconst.sh module +--------------------------------------------- -.. automodule:: scilpy.reconst.raw_signal +.. automodule:: scilpy.reconst.sh :members: :undoc-members: :show-inheritance: diff --git a/scilpy/dwi/operations.py b/scilpy/dwi/operations.py index 3e8a5001f..230b320eb 100644 --- a/scilpy/dwi/operations.py +++ b/scilpy/dwi/operations.py @@ -1,3 +1,4 @@ +import logging import numpy as np diff --git a/scripts/scil_compute_asym_odf_metrics.py b/scripts/scil_compute_asym_odf_metrics.py index 40e5661e6..fd3b72e3e 100755 --- a/scripts/scil_compute_asym_odf_metrics.py +++ b/scripts/scil_compute_asym_odf_metrics.py @@ -32,7 +32,7 @@ from dipy.data import get_sphere, SPHERE_FILES from dipy.direction.peaks import reshape_peaks_for_visualization -from scilpy.reconst.multi_processes import peaks_from_sh +from scilpy.reconst.sh import peaks_from_sh from scilpy.reconst.utils import get_sh_order_and_fullness from scilpy.reconst.aodf import (compute_asymmetry_index, compute_odd_power_map) diff --git a/scripts/scil_compute_divide.py b/scripts/scil_compute_divide.py index 0ef609a66..2658cf748 100755 --- a/scripts/scil_compute_divide.py +++ b/scripts/scil_compute_divide.py @@ -37,8 +37,7 @@ from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, assert_outputs_exist, add_force_b0_arg, add_processes_arg, add_verbose_arg) -from scilpy.reconst.multi_processes import fit_gamma -from scilpy.reconst.divide_fit import gamma_fit2metrics +from scilpy.reconst.divide import fit_gamma, gamma_fit2metrics from scilpy.reconst.b_tensor_utils import generate_btensor_input diff --git a/scripts/scil_compute_fodf_metrics.py b/scripts/scil_compute_fodf_metrics.py index f597b72a6..d96b88863 100755 --- a/scripts/scil_compute_fodf_metrics.py +++ b/scripts/scil_compute_fodf_metrics.py @@ -42,7 +42,7 @@ from scilpy.io.utils import (add_overwrite_arg, add_sh_basis_args, add_processes_arg, assert_inputs_exist, assert_outputs_exist) -from scilpy.reconst.multi_processes import peaks_from_sh, maps_from_sh +from scilpy.reconst.sh import peaks_from_sh, maps_from_sh def _build_arg_parser(): diff --git a/scripts/scil_compute_memsmt_fodf.py b/scripts/scil_compute_memsmt_fodf.py index fc0ef0928..8b21a0ec8 100755 --- a/scripts/scil_compute_memsmt_fodf.py +++ b/scripts/scil_compute_memsmt_fodf.py @@ -47,7 +47,8 @@ assert_outputs_exist, add_force_b0_arg, add_sh_basis_args, add_processes_arg, add_verbose_arg) -from scilpy.reconst.multi_processes import fit_from_model, convert_sh_basis +from scilpy.reconst.fodf import fit_from_model +from scilpy.reconst.sh import convert_sh_basis from scilpy.reconst.b_tensor_utils import generate_btensor_input diff --git a/scripts/scil_compute_msmt_fodf.py b/scripts/scil_compute_msmt_fodf.py index 5934e4a42..d88642c45 100755 --- a/scripts/scil_compute_msmt_fodf.py +++ b/scripts/scil_compute_msmt_fodf.py @@ -27,14 +27,16 @@ import nibabel as nib import numpy as np +from scilpy.gradients.bvec_bval_tools import (check_b0_threshold, + normalize_bvecs, + is_normalized_bvecs) from scilpy.io.image import get_data_as_mask from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, assert_outputs_exist, add_force_b0_arg, add_sh_basis_args, add_processes_arg, add_verbose_arg) -from scilpy.reconst.multi_processes import fit_from_model, convert_sh_basis -from scilpy.gradients.bvec_bval_tools import (check_b0_threshold, normalize_bvecs, - is_normalized_bvecs) +from scilpy.reconst.fodf import fit_from_model +from scilpy.reconst.sh import convert_sh_basis def _build_arg_parser(): diff --git a/scripts/scil_compute_sf_from_sh.py b/scripts/scil_compute_sf_from_sh.py index 300e20463..368c06a44 100755 --- a/scripts/scil_compute_sf_from_sh.py +++ b/scripts/scil_compute_sf_from_sh.py @@ -23,7 +23,7 @@ add_processes_arg, add_sh_basis_args, assert_inputs_exist, assert_outputs_exist, validate_nbr_processes) -from scilpy.reconst.multi_processes import convert_sh_to_sf +from scilpy.reconst.sh import convert_sh_to_sf from scilpy.gradients.bvec_bval_tools import (check_b0_threshold) diff --git a/scripts/scil_compute_sh_from_signal.py b/scripts/scil_compute_sh_from_signal.py index e21ae4ca7..0d79d295a 100755 --- a/scripts/scil_compute_sh_from_signal.py +++ b/scripts/scil_compute_sh_from_signal.py @@ -16,7 +16,7 @@ from scilpy.io.utils import (add_force_b0_arg, add_overwrite_arg, add_sh_basis_args, assert_inputs_exist, assert_outputs_exist) -from scilpy.reconst.raw_signal import compute_sh_coefficients +from scilpy.reconst.dwi import compute_sh_coefficients def _build_arg_parser(): diff --git a/scripts/scil_compute_ssst_fodf.py b/scripts/scil_compute_ssst_fodf.py index 15bbb9735..d87a109b6 100755 --- a/scripts/scil_compute_ssst_fodf.py +++ b/scripts/scil_compute_ssst_fodf.py @@ -17,14 +17,16 @@ import nibabel as nib import numpy as np +from scilpy.gradients.bvec_bval_tools import (check_b0_threshold, + normalize_bvecs, + is_normalized_bvecs) from scilpy.io.image import get_data_as_mask from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, add_verbose_arg, assert_outputs_exist, add_force_b0_arg, add_sh_basis_args, add_processes_arg) -from scilpy.reconst.multi_processes import fit_from_model, convert_sh_basis -from scilpy.gradients.bvec_bval_tools import (check_b0_threshold, normalize_bvecs, - is_normalized_bvecs) +from scilpy.reconst.fodf import fit_from_model +from scilpy.reconst.sh import convert_sh_basis def _build_arg_parser(): diff --git a/scripts/scil_convert_sh_basis.py b/scripts/scil_convert_sh_basis.py index b1412e935..674eed8c3 100755 --- a/scripts/scil_convert_sh_basis.py +++ b/scripts/scil_convert_sh_basis.py @@ -15,7 +15,7 @@ import nibabel as nib import numpy as np -from scilpy.reconst.multi_processes import convert_sh_basis +from scilpy.reconst.sh import convert_sh_basis from scilpy.io.utils import (add_overwrite_arg, add_sh_basis_args, add_processes_arg, assert_inputs_exist, assert_outputs_exist) From addae0069d4163edb2f3072d611ad5e3008e0df6 Mon Sep 17 00:00:00 2001 From: karp2601 Date: Mon, 13 Nov 2023 10:55:00 -0500 Subject: [PATCH 03/10] Moving streamline stuff to tractanalysis module --- scilpy/reconst/afd_along_streamlines.py | 134 ------------------ .../reconst/lobe_metrics_along_streamlines.py | 117 --------------- 2 files changed, 251 deletions(-) delete mode 100644 scilpy/reconst/afd_along_streamlines.py delete mode 100644 scilpy/reconst/lobe_metrics_along_streamlines.py diff --git a/scilpy/reconst/afd_along_streamlines.py b/scilpy/reconst/afd_along_streamlines.py deleted file mode 100644 index 9b9b32efa..000000000 --- a/scilpy/reconst/afd_along_streamlines.py +++ /dev/null @@ -1,134 +0,0 @@ -# -*- coding: utf-8 -*- - -from dipy.data import get_sphere -import numpy as np -from scipy.special import lpn - -from scilpy.reconst.utils import find_order_from_nb_coeff, get_b_matrix -from scilpy.tractanalysis.grid_intersections import grid_intersections - - -def afd_map_along_streamlines(sft, fodf, fodf_basis, length_weighting): - """ - Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF - (radfODF) maps along a bundle. - - Parameters - ---------- - sft : StatefulTractogram - StatefulTractogram containing the streamlines needed. - fodf : nibabel.image - fODF with shape (X, Y, Z, #coeffs) - coeffs depending on the sh_order - fodf_basis : string - Has to be descoteaux07 or tournier07 - length_weighting : bool - If set, will weigh the AFD values according to segment lengths - - Returns - ------- - afd_sum : np.array - AFD map (weighted if length_weighting) - rd_sum : np.array - rdAFD map (weighted if length_weighting) - """ - - - afd_sum, rd_sum, weights = \ - afd_and_rd_sums_along_streamlines(sft, fodf, fodf_basis, - length_weighting) - - non_zeros = np.nonzero(afd_sum) - weights_nz = weights[non_zeros] - afd_sum[non_zeros] /= weights_nz - rd_sum[non_zeros] /= weights_nz - - return afd_sum, rd_sum - - -def afd_and_rd_sums_along_streamlines(sft, fodf, fodf_basis, - length_weighting): - """ - Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF) - maps along a bundle. - - Parameters - ---------- - sft : StatefulTractogram - StatefulTractogram containing the streamlines needed. - fodf : nibabel.image - fODF with shape (X, Y, Z, #coeffs). - #coeffs depend on the sh_order. - fodf_basis : string - Has to be descoteaux07 or tournier07. - length_weighting : bool - If set, will weigh the AFD values according to segment lengths. - - Returns - ------- - afd_sum_map : np.array - AFD map. - rd_sum_map : np.array - fdAFD map. - weight_map : np.array - Segment lengths. - """ - - sft.to_vox() - sft.to_corner() - - fodf_data = fodf.get_fdata(dtype=np.float32) - order = find_order_from_nb_coeff(fodf_data) - sphere = get_sphere('repulsion724') - b_matrix, _, n = get_b_matrix(order, sphere, fodf_basis, return_all=True) - legendre0_at_n = lpn(order, 0)[0][n] - sphere_norm = np.linalg.norm(sphere.vertices) - - afd_sum_map = np.zeros(shape=fodf_data.shape[:-1]) - rd_sum_map = np.zeros(shape=fodf_data.shape[:-1]) - weight_map = np.zeros(shape=fodf_data.shape[:-1]) - - p_matrix = np.eye(fodf_data.shape[3]) * legendre0_at_n - all_crossed_indices = grid_intersections(sft.streamlines) - for crossed_indices in all_crossed_indices: - segments = crossed_indices[1:] - crossed_indices[:-1] - seg_lengths = np.linalg.norm(segments, axis=1) - - # Remove points where the segment is zero. - # This removes numpy warnings of division by zero. - non_zero_lengths = np.nonzero(seg_lengths)[0] - segments = segments[non_zero_lengths] - seg_lengths = seg_lengths[non_zero_lengths] - - test = np.dot(segments, sphere.vertices.T) - test2 = (test.T / (seg_lengths * sphere_norm)).T - angles = np.arccos(test2) - sorted_angles = np.argsort(angles, axis=1) - closest_vertex_indices = sorted_angles[:, 0] - - # Those starting points are used for the segment vox_idx computations - strl_start = crossed_indices[non_zero_lengths] - vox_indices = (strl_start + (0.5 * segments)).astype(int) - - normalization_weights = np.ones_like(seg_lengths) - if length_weighting: - normalization_weights = seg_lengths / np.linalg.norm(fodf.header.get_zooms()[:3]) - - for vox_idx, closest_vertex_index, norm_weight in zip(vox_indices, - closest_vertex_indices, - normalization_weights): - vox_idx = tuple(vox_idx) - b_at_idx = b_matrix[closest_vertex_index] - fodf_at_index = fodf_data[vox_idx] - - afd_val = np.dot(b_at_idx, fodf_at_index) - rd_val = np.dot(np.dot(b_at_idx.T, p_matrix), - fodf_at_index) - - afd_sum_map[vox_idx] += afd_val * norm_weight - rd_sum_map[vox_idx] += rd_val * norm_weight - weight_map[vox_idx] += norm_weight - - rd_sum_map[rd_sum_map < 0.] = 0. - - return afd_sum_map, rd_sum_map, weight_map diff --git a/scilpy/reconst/lobe_metrics_along_streamlines.py b/scilpy/reconst/lobe_metrics_along_streamlines.py deleted file mode 100644 index 7429c377b..000000000 --- a/scilpy/reconst/lobe_metrics_along_streamlines.py +++ /dev/null @@ -1,117 +0,0 @@ -# -*- coding: utf-8 -*- - -from dipy.io.stateful_tractogram import StatefulTractogram -import numpy as np -from scilpy.reconst.bingham import bingham_to_peak_direction -from scilpy.tractanalysis.grid_intersections import grid_intersections - - -def lobe_specific_metric_map_along_streamlines(sft, bingham_coeffs, - metric, max_theta, - length_weighting): - """ - Compute mean map for a given lobe-specific metric along streamlines. - - Parameters - ---------- - sft : StatefulTractogram - StatefulTractogram containing the streamlines needed. - bingham_coeffs : ndarray - Array of shape (X, Y, Z, N_LOBES, NB_PARAMS) containing - the Bingham distributions parameters. - metric : ndarray - Array of shape (X, Y, Z) containing the lobe-specific - metric of interest. - max_theta : float - Maximum angle in degrees between the fiber direction and the - Bingham peak direction. - length_weighting : bool - If True, will weigh the metric values according to segment lengths. - """ - - fd_sum, weights = \ - lobe_metric_sum_along_streamlines(sft, bingham_coeffs, - metric, max_theta, - length_weighting) - - non_zeros = np.nonzero(fd_sum) - weights_nz = weights[non_zeros] - fd_sum[non_zeros] /= weights_nz - - return fd_sum - - -def lobe_metric_sum_along_streamlines(sft, bingham_coeffs, metric, - max_theta, length_weighting): - """ - Compute a sum map along a bundle for a given lobe-specific metric. - - Parameters - ---------- - sft : StatefulTractogram - StatefulTractogram containing the streamlines needed. - bingham_coeffs : ndarray (X, Y, Z, N_LOBES, NB_PARAMS) - Bingham distributions parameters volume. - metric : ndarray (X, Y, Z) - The lobe-specific metric of interest. - max_theta : float - Maximum angle in degrees between the fiber direction and the - Bingham peak direction. - length_weighting : bool - If True, will weight the metric values according to segment lengths. - - Returns - ------- - metric_sum_map : np.array - Lobe-specific metric sum map. - weight_map : np.array - Segment lengths. - """ - - sft.to_vox() - sft.to_corner() - - metric_sum_map = np.zeros(metric.shape[:-1]) - weight_map = np.zeros(metric.shape[:-1]) - min_cos_theta = np.cos(np.radians(max_theta)) - - all_crossed_indices = grid_intersections(sft.streamlines) - for crossed_indices in all_crossed_indices: - segments = crossed_indices[1:] - crossed_indices[:-1] - seg_lengths = np.linalg.norm(segments, axis=1) - - # Remove points where the segment is zero. - # This removes numpy warnings of division by zero. - non_zero_lengths = np.nonzero(seg_lengths)[0] - segments = segments[non_zero_lengths] - seg_lengths = seg_lengths[non_zero_lengths] - - # Those starting points are used for the segment vox_idx computations - seg_start = crossed_indices[non_zero_lengths] - vox_indices = (seg_start + (0.5 * segments)).astype(int) - - normalization_weights = np.ones_like(seg_lengths) - if length_weighting: - normalization_weights = seg_lengths - - normalized_seg = np.reshape(segments / seg_lengths[..., None], (-1, 3)) - - for vox_idx, seg_dir, norm_weight in zip(vox_indices, - normalized_seg, - normalization_weights): - vox_idx = tuple(vox_idx) - bingham_at_idx = bingham_coeffs[vox_idx] # (5, N_PARAMS) - - bingham_peak_dir = bingham_to_peak_direction(bingham_at_idx) - cos_theta = np.abs(np.dot(seg_dir.reshape((-1, 3)), - bingham_peak_dir.T)) - - metric_val = 0.0 - if (cos_theta > min_cos_theta).any(): - lobe_idx = np.argmax(np.squeeze(cos_theta), axis=0) # (n_segs) - metric_val = metric[vox_idx][lobe_idx] - - metric_sum_map[vox_idx] += metric_val * norm_weight - weight_map[vox_idx] += norm_weight - - return metric_sum_map, weight_map From 5c408f419e06be76a0aad15ce7352bbbf03ba893 Mon Sep 17 00:00:00 2001 From: karp2601 Date: Mon, 13 Nov 2023 10:55:19 -0500 Subject: [PATCH 04/10] Moving streamline stuff to tractanalysis module v2 --- scilpy/tractanalysis/afd_along_streamlines.py | 134 ++++++++++++++++++ .../lobe_metrics_along_streamlines.py | 117 +++++++++++++++ 2 files changed, 251 insertions(+) create mode 100644 scilpy/tractanalysis/afd_along_streamlines.py create mode 100644 scilpy/tractanalysis/lobe_metrics_along_streamlines.py diff --git a/scilpy/tractanalysis/afd_along_streamlines.py b/scilpy/tractanalysis/afd_along_streamlines.py new file mode 100644 index 000000000..9b9b32efa --- /dev/null +++ b/scilpy/tractanalysis/afd_along_streamlines.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- + +from dipy.data import get_sphere +import numpy as np +from scipy.special import lpn + +from scilpy.reconst.utils import find_order_from_nb_coeff, get_b_matrix +from scilpy.tractanalysis.grid_intersections import grid_intersections + + +def afd_map_along_streamlines(sft, fodf, fodf_basis, length_weighting): + """ + Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF + (radfODF) maps along a bundle. + + Parameters + ---------- + sft : StatefulTractogram + StatefulTractogram containing the streamlines needed. + fodf : nibabel.image + fODF with shape (X, Y, Z, #coeffs) + coeffs depending on the sh_order + fodf_basis : string + Has to be descoteaux07 or tournier07 + length_weighting : bool + If set, will weigh the AFD values according to segment lengths + + Returns + ------- + afd_sum : np.array + AFD map (weighted if length_weighting) + rd_sum : np.array + rdAFD map (weighted if length_weighting) + """ + + + afd_sum, rd_sum, weights = \ + afd_and_rd_sums_along_streamlines(sft, fodf, fodf_basis, + length_weighting) + + non_zeros = np.nonzero(afd_sum) + weights_nz = weights[non_zeros] + afd_sum[non_zeros] /= weights_nz + rd_sum[non_zeros] /= weights_nz + + return afd_sum, rd_sum + + +def afd_and_rd_sums_along_streamlines(sft, fodf, fodf_basis, + length_weighting): + """ + Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF) + maps along a bundle. + + Parameters + ---------- + sft : StatefulTractogram + StatefulTractogram containing the streamlines needed. + fodf : nibabel.image + fODF with shape (X, Y, Z, #coeffs). + #coeffs depend on the sh_order. + fodf_basis : string + Has to be descoteaux07 or tournier07. + length_weighting : bool + If set, will weigh the AFD values according to segment lengths. + + Returns + ------- + afd_sum_map : np.array + AFD map. + rd_sum_map : np.array + fdAFD map. + weight_map : np.array + Segment lengths. + """ + + sft.to_vox() + sft.to_corner() + + fodf_data = fodf.get_fdata(dtype=np.float32) + order = find_order_from_nb_coeff(fodf_data) + sphere = get_sphere('repulsion724') + b_matrix, _, n = get_b_matrix(order, sphere, fodf_basis, return_all=True) + legendre0_at_n = lpn(order, 0)[0][n] + sphere_norm = np.linalg.norm(sphere.vertices) + + afd_sum_map = np.zeros(shape=fodf_data.shape[:-1]) + rd_sum_map = np.zeros(shape=fodf_data.shape[:-1]) + weight_map = np.zeros(shape=fodf_data.shape[:-1]) + + p_matrix = np.eye(fodf_data.shape[3]) * legendre0_at_n + all_crossed_indices = grid_intersections(sft.streamlines) + for crossed_indices in all_crossed_indices: + segments = crossed_indices[1:] - crossed_indices[:-1] + seg_lengths = np.linalg.norm(segments, axis=1) + + # Remove points where the segment is zero. + # This removes numpy warnings of division by zero. + non_zero_lengths = np.nonzero(seg_lengths)[0] + segments = segments[non_zero_lengths] + seg_lengths = seg_lengths[non_zero_lengths] + + test = np.dot(segments, sphere.vertices.T) + test2 = (test.T / (seg_lengths * sphere_norm)).T + angles = np.arccos(test2) + sorted_angles = np.argsort(angles, axis=1) + closest_vertex_indices = sorted_angles[:, 0] + + # Those starting points are used for the segment vox_idx computations + strl_start = crossed_indices[non_zero_lengths] + vox_indices = (strl_start + (0.5 * segments)).astype(int) + + normalization_weights = np.ones_like(seg_lengths) + if length_weighting: + normalization_weights = seg_lengths / np.linalg.norm(fodf.header.get_zooms()[:3]) + + for vox_idx, closest_vertex_index, norm_weight in zip(vox_indices, + closest_vertex_indices, + normalization_weights): + vox_idx = tuple(vox_idx) + b_at_idx = b_matrix[closest_vertex_index] + fodf_at_index = fodf_data[vox_idx] + + afd_val = np.dot(b_at_idx, fodf_at_index) + rd_val = np.dot(np.dot(b_at_idx.T, p_matrix), + fodf_at_index) + + afd_sum_map[vox_idx] += afd_val * norm_weight + rd_sum_map[vox_idx] += rd_val * norm_weight + weight_map[vox_idx] += norm_weight + + rd_sum_map[rd_sum_map < 0.] = 0. + + return afd_sum_map, rd_sum_map, weight_map diff --git a/scilpy/tractanalysis/lobe_metrics_along_streamlines.py b/scilpy/tractanalysis/lobe_metrics_along_streamlines.py new file mode 100644 index 000000000..7429c377b --- /dev/null +++ b/scilpy/tractanalysis/lobe_metrics_along_streamlines.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- + +from dipy.io.stateful_tractogram import StatefulTractogram +import numpy as np +from scilpy.reconst.bingham import bingham_to_peak_direction +from scilpy.tractanalysis.grid_intersections import grid_intersections + + +def lobe_specific_metric_map_along_streamlines(sft, bingham_coeffs, + metric, max_theta, + length_weighting): + """ + Compute mean map for a given lobe-specific metric along streamlines. + + Parameters + ---------- + sft : StatefulTractogram + StatefulTractogram containing the streamlines needed. + bingham_coeffs : ndarray + Array of shape (X, Y, Z, N_LOBES, NB_PARAMS) containing + the Bingham distributions parameters. + metric : ndarray + Array of shape (X, Y, Z) containing the lobe-specific + metric of interest. + max_theta : float + Maximum angle in degrees between the fiber direction and the + Bingham peak direction. + length_weighting : bool + If True, will weigh the metric values according to segment lengths. + """ + + fd_sum, weights = \ + lobe_metric_sum_along_streamlines(sft, bingham_coeffs, + metric, max_theta, + length_weighting) + + non_zeros = np.nonzero(fd_sum) + weights_nz = weights[non_zeros] + fd_sum[non_zeros] /= weights_nz + + return fd_sum + + +def lobe_metric_sum_along_streamlines(sft, bingham_coeffs, metric, + max_theta, length_weighting): + """ + Compute a sum map along a bundle for a given lobe-specific metric. + + Parameters + ---------- + sft : StatefulTractogram + StatefulTractogram containing the streamlines needed. + bingham_coeffs : ndarray (X, Y, Z, N_LOBES, NB_PARAMS) + Bingham distributions parameters volume. + metric : ndarray (X, Y, Z) + The lobe-specific metric of interest. + max_theta : float + Maximum angle in degrees between the fiber direction and the + Bingham peak direction. + length_weighting : bool + If True, will weight the metric values according to segment lengths. + + Returns + ------- + metric_sum_map : np.array + Lobe-specific metric sum map. + weight_map : np.array + Segment lengths. + """ + + sft.to_vox() + sft.to_corner() + + metric_sum_map = np.zeros(metric.shape[:-1]) + weight_map = np.zeros(metric.shape[:-1]) + min_cos_theta = np.cos(np.radians(max_theta)) + + all_crossed_indices = grid_intersections(sft.streamlines) + for crossed_indices in all_crossed_indices: + segments = crossed_indices[1:] - crossed_indices[:-1] + seg_lengths = np.linalg.norm(segments, axis=1) + + # Remove points where the segment is zero. + # This removes numpy warnings of division by zero. + non_zero_lengths = np.nonzero(seg_lengths)[0] + segments = segments[non_zero_lengths] + seg_lengths = seg_lengths[non_zero_lengths] + + # Those starting points are used for the segment vox_idx computations + seg_start = crossed_indices[non_zero_lengths] + vox_indices = (seg_start + (0.5 * segments)).astype(int) + + normalization_weights = np.ones_like(seg_lengths) + if length_weighting: + normalization_weights = seg_lengths + + normalized_seg = np.reshape(segments / seg_lengths[..., None], (-1, 3)) + + for vox_idx, seg_dir, norm_weight in zip(vox_indices, + normalized_seg, + normalization_weights): + vox_idx = tuple(vox_idx) + bingham_at_idx = bingham_coeffs[vox_idx] # (5, N_PARAMS) + + bingham_peak_dir = bingham_to_peak_direction(bingham_at_idx) + cos_theta = np.abs(np.dot(seg_dir.reshape((-1, 3)), + bingham_peak_dir.T)) + + metric_val = 0.0 + if (cos_theta > min_cos_theta).any(): + lobe_idx = np.argmax(np.squeeze(cos_theta), axis=0) # (n_segs) + metric_val = metric[vox_idx][lobe_idx] + + metric_sum_map[vox_idx] += metric_val * norm_weight + weight_map[vox_idx] += norm_weight + + return metric_sum_map, weight_map From dfd8b3ef88079627cb79cbc4017fed21855a83d4 Mon Sep 17 00:00:00 2001 From: karp2601 Date: Mon, 13 Nov 2023 10:59:19 -0500 Subject: [PATCH 05/10] Adjusting scripts --- scripts/scil_apply_bias_field_on_dwi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/scil_apply_bias_field_on_dwi.py b/scripts/scil_apply_bias_field_on_dwi.py index 5548b8e08..2b8cc8c93 100755 --- a/scripts/scil_apply_bias_field_on_dwi.py +++ b/scripts/scil_apply_bias_field_on_dwi.py @@ -16,7 +16,7 @@ from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, assert_outputs_exist) -from scilpy.dwi.utils import rescale_dwi +from scilpy.dwi.operations import rescale_dwi def _build_arg_parser(): From 3214c3a7383804615af3cede1de3baccbe05b785 Mon Sep 17 00:00:00 2001 From: karp2601 Date: Mon, 13 Nov 2023 11:03:35 -0500 Subject: [PATCH 06/10] Moving reconst.b_tensor_utils to io.b_tensor --- scilpy/{reconst/b_tensor_utils.py => io/b_tensor.py} | 0 scilpy/reconst/divide.py | 6 +++--- scripts/scil_compute_divide.py | 2 +- scripts/scil_compute_memsmt_fodf.py | 2 +- scripts/scil_compute_memsmt_frf.py | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) rename scilpy/{reconst/b_tensor_utils.py => io/b_tensor.py} (100%) diff --git a/scilpy/reconst/b_tensor_utils.py b/scilpy/io/b_tensor.py similarity index 100% rename from scilpy/reconst/b_tensor_utils.py rename to scilpy/io/b_tensor.py diff --git a/scilpy/reconst/divide.py b/scilpy/reconst/divide.py index d834a1a3e..855ac5ca5 100644 --- a/scilpy/reconst/divide.py +++ b/scilpy/reconst/divide.py @@ -41,7 +41,7 @@ def _random_p0(signal, gtab_infos, lb, ub, weight, n_iter): Contains information about the gtab, such as the unique bvals, the encoding types, the number of directions and the acquisition index. Obtained as output of the function - `reconst.b_tensor_utils.generate_btensor_input`. + `io.b_tensor.generate_btensor_input`. lb : np.ndarray of floats Lower boundaries of the fitting parameters. ub : np.ndarray of floats @@ -84,7 +84,7 @@ def gamma_data2fit(signal, gtab_infos, fit_iters=1, random_iters=50, Contains information about the gtab, such as the unique bvals, the encoding types, the number of directions and the acquisition index. Obtained as output of the function - `reconst.b_tensor_utils.generate_btensor_input`. + `io.b_tensor.generate_btensor_input`. fit_iters : int, optional Number of iterations in the gamma fit. Defaults to 1. random_iters : int, optional @@ -192,7 +192,7 @@ def gamma_fit2data(gtab_infos, params): Contains information about the gtab, such as the unique bvals, the encoding types, the number of directions and the acquisition index. Obtained as output of the function - `reconst.b_tensor_utils.generate_btensor_input`. + `io.b_tensor.generate_btensor_input`. params : np.array Array containing the parameters of the fit. diff --git a/scripts/scil_compute_divide.py b/scripts/scil_compute_divide.py index 2658cf748..ed4e58c69 100755 --- a/scripts/scil_compute_divide.py +++ b/scripts/scil_compute_divide.py @@ -33,12 +33,12 @@ import numpy as np from scilpy.image.utils import extract_affine +from scilpy.io.b_tensor import generate_btensor_input from scilpy.io.image import get_data_as_mask from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, assert_outputs_exist, add_force_b0_arg, add_processes_arg, add_verbose_arg) from scilpy.reconst.divide import fit_gamma, gamma_fit2metrics -from scilpy.reconst.b_tensor_utils import generate_btensor_input def _build_arg_parser(): diff --git a/scripts/scil_compute_memsmt_fodf.py b/scripts/scil_compute_memsmt_fodf.py index 8b21a0ec8..b5ff4dbe6 100755 --- a/scripts/scil_compute_memsmt_fodf.py +++ b/scripts/scil_compute_memsmt_fodf.py @@ -42,6 +42,7 @@ import numpy as np from scilpy.image.utils import extract_affine +from scilpy.io.b_tensor import generate_btensor_input from scilpy.io.image import get_data_as_mask from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, assert_outputs_exist, add_force_b0_arg, @@ -49,7 +50,6 @@ add_verbose_arg) from scilpy.reconst.fodf import fit_from_model from scilpy.reconst.sh import convert_sh_basis -from scilpy.reconst.b_tensor_utils import generate_btensor_input def _build_arg_parser(): diff --git a/scripts/scil_compute_memsmt_frf.py b/scripts/scil_compute_memsmt_frf.py index 5cf88ef71..0fd6b65bb 100755 --- a/scripts/scil_compute_memsmt_frf.py +++ b/scripts/scil_compute_memsmt_frf.py @@ -42,13 +42,13 @@ from scilpy.dwi.utils import extract_dwi_shell from scilpy.image.utils import extract_affine +from scilpy.io.b_tensor import generate_btensor_input from scilpy.io.image import get_data_as_mask from scilpy.io.utils import (add_force_b0_arg, add_overwrite_arg, add_verbose_arg, assert_inputs_exist, assert_outputs_exist, assert_roi_radii_format) from scilpy.reconst.frf import compute_msmt_frf -from scilpy.reconst.b_tensor_utils import generate_btensor_input def buildArgsParser(): From 561774969524f76602e5a93ee4c8f0a43ea3235c Mon Sep 17 00:00:00 2001 From: karp2601 Date: Mon, 13 Nov 2023 12:11:32 -0500 Subject: [PATCH 07/10] Fixing tractanalysis scripts import. --- scripts/scil_compute_mean_fixel_afd_from_bundles.py | 2 +- scripts/scil_compute_mean_fixel_afd_from_hdf5.py | 2 +- scripts/scil_compute_mean_fixel_lobe_metric_from_bundles.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/scil_compute_mean_fixel_afd_from_bundles.py b/scripts/scil_compute_mean_fixel_afd_from_bundles.py index 176e620c8..f7588b610 100755 --- a/scripts/scil_compute_mean_fixel_afd_from_bundles.py +++ b/scripts/scil_compute_mean_fixel_afd_from_bundles.py @@ -20,7 +20,7 @@ from scilpy.io.utils import (add_overwrite_arg, add_sh_basis_args, add_reference_arg, assert_inputs_exist, assert_outputs_exist) -from scilpy.reconst.afd_along_streamlines import afd_map_along_streamlines +from scilpy.tractanalysis.afd_along_streamlines import afd_map_along_streamlines EPILOG = """ Reference: diff --git a/scripts/scil_compute_mean_fixel_afd_from_hdf5.py b/scripts/scil_compute_mean_fixel_afd_from_hdf5.py index 6d26e3c0d..b66342149 100755 --- a/scripts/scil_compute_mean_fixel_afd_from_hdf5.py +++ b/scripts/scil_compute_mean_fixel_afd_from_hdf5.py @@ -30,7 +30,7 @@ assert_inputs_exist, assert_outputs_exist, validate_nbr_processes) -from scilpy.reconst.afd_along_streamlines import afd_map_along_streamlines +from scilpy.tractanalysis.afd_along_streamlines import afd_map_along_streamlines EPILOG = """ Reference: diff --git a/scripts/scil_compute_mean_fixel_lobe_metric_from_bundles.py b/scripts/scil_compute_mean_fixel_lobe_metric_from_bundles.py index 3d4568595..691652c57 100755 --- a/scripts/scil_compute_mean_fixel_lobe_metric_from_bundles.py +++ b/scripts/scil_compute_mean_fixel_lobe_metric_from_bundles.py @@ -32,7 +32,7 @@ from scilpy.io.utils import (add_overwrite_arg, add_reference_arg, assert_inputs_exist, assert_outputs_exist) -from scilpy.reconst.lobe_metrics_along_streamlines \ +from scilpy.tractanalysis.lobe_metrics_along_streamlines \ import lobe_specific_metric_map_along_streamlines From 84d62eebc1023f2c6cd282cac3eec3da92070985 Mon Sep 17 00:00:00 2001 From: karp2601 Date: Mon, 13 Nov 2023 12:14:17 -0500 Subject: [PATCH 08/10] Fixing import lenght --- scripts/scil_compute_mean_fixel_afd_from_bundles.py | 3 ++- scripts/scil_compute_mean_fixel_afd_from_hdf5.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/scil_compute_mean_fixel_afd_from_bundles.py b/scripts/scil_compute_mean_fixel_afd_from_bundles.py index f7588b610..193c7a662 100755 --- a/scripts/scil_compute_mean_fixel_afd_from_bundles.py +++ b/scripts/scil_compute_mean_fixel_afd_from_bundles.py @@ -20,7 +20,8 @@ from scilpy.io.utils import (add_overwrite_arg, add_sh_basis_args, add_reference_arg, assert_inputs_exist, assert_outputs_exist) -from scilpy.tractanalysis.afd_along_streamlines import afd_map_along_streamlines +from scilpy.tractanalysis.afd_along_streamlines \ + import afd_map_along_streamlines EPILOG = """ Reference: diff --git a/scripts/scil_compute_mean_fixel_afd_from_hdf5.py b/scripts/scil_compute_mean_fixel_afd_from_hdf5.py index b66342149..16db8a613 100755 --- a/scripts/scil_compute_mean_fixel_afd_from_hdf5.py +++ b/scripts/scil_compute_mean_fixel_afd_from_hdf5.py @@ -30,7 +30,8 @@ assert_inputs_exist, assert_outputs_exist, validate_nbr_processes) -from scilpy.tractanalysis.afd_along_streamlines import afd_map_along_streamlines +from scilpy.tractanalysis.afd_along_streamlines \ + import afd_map_along_streamlines EPILOG = """ Reference: From 8b2dc29f7ecff4e4498af0c978b08c3f5dfa50b3 Mon Sep 17 00:00:00 2001 From: karp2601 Date: Tue, 21 Nov 2023 10:25:05 -0500 Subject: [PATCH 09/10] pep 8 for Emmanuelle --- scripts/legacy/scil_flip_gradients.py | 2 +- scripts/legacy/scil_resample_bvals.py | 2 +- scripts/legacy/scil_swap_gradients.py | 2 +- scripts/scil_gradients_modify_axes.py | 3 ++- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/legacy/scil_flip_gradients.py b/scripts/legacy/scil_flip_gradients.py index a3585d5a7..06632c46e 100644 --- a/scripts/legacy/scil_flip_gradients.py +++ b/scripts/legacy/scil_flip_gradients.py @@ -7,7 +7,7 @@ DEPRECATION_MSG = """ This script has been renamed scil_gradients_modify_axes.py. -Please change your existing pipelines accordingly. Please note that options +Please change your existing pipelines accordingly. Please note that options have changed, too. """ diff --git a/scripts/legacy/scil_resample_bvals.py b/scripts/legacy/scil_resample_bvals.py index e0a4ef44f..8fcc60b3d 100644 --- a/scripts/legacy/scil_resample_bvals.py +++ b/scripts/legacy/scil_resample_bvals.py @@ -7,7 +7,7 @@ DEPRECATION_MSG = """ This script has been renamed scil_gradients_round_bvals_to_shells.py. -Please change your existing pipelines accordingly. Please note that options +Please change your existing pipelines accordingly. Please note that options have changed, too. """ diff --git a/scripts/legacy/scil_swap_gradients.py b/scripts/legacy/scil_swap_gradients.py index 6ed1b0055..78ead9372 100644 --- a/scripts/legacy/scil_swap_gradients.py +++ b/scripts/legacy/scil_swap_gradients.py @@ -7,7 +7,7 @@ DEPRECATION_MSG = """ This script has been renamed scil_gradients_modify_axes.py. -Please change your existing pipelines accordingly. Please note that options +Please change your existing pipelines accordingly. Please note that options have changed, too. """ diff --git a/scripts/scil_gradients_modify_axes.py b/scripts/scil_gradients_modify_axes.py index 4a7b94fb5..25c6e5955 100755 --- a/scripts/scil_gradients_modify_axes.py +++ b/scripts/scil_gradients_modify_axes.py @@ -11,7 +11,8 @@ import numpy as np from scilpy.gradients.bvec_bval_tools import (flip_gradient_sampling, - swap_gradient_axis, str_to_axis_index) + swap_gradient_axis, + str_to_axis_index) from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, assert_outputs_exist) From 6418ea0180cc9eed1c80e8fd292ed720264d4b0c Mon Sep 17 00:00:00 2001 From: karp2601 Date: Tue, 21 Nov 2023 11:28:06 -0500 Subject: [PATCH 10/10] Adding tests and moving reconst.dti to io.tensor --- docs/source/modules/scilpy.reconst.rst | 8 ---- scilpy/{reconst/dti.py => io/tensor.py} | 0 scilpy/reconst/tests/test_aodf.py | 11 ++++++ scilpy/reconst/tests/test_bingham.py | 31 +++++++++++++++ scilpy/reconst/tests/test_divide.py | 21 ++++++++++ scilpy/reconst/tests/test_dwi.py | 6 +++ scilpy/reconst/tests/test_fiber_coherence.py | 11 ++++++ scilpy/reconst/tests/test_fodf.py | 11 ++++++ scilpy/reconst/tests/test_frf.py | 11 ++++++ scilpy/reconst/tests/test_mti.py | 41 ++++++++++++++++++++ scilpy/reconst/tests/test_sh.py | 26 +++++++++++++ scilpy/reconst/tests/test_utils.py | 26 +++++++++++++ scripts/scil_compute_dti_metrics.py | 5 ++- scripts/scil_convert_tensors.py | 6 +-- 14 files changed, 201 insertions(+), 13 deletions(-) rename scilpy/{reconst/dti.py => io/tensor.py} (100%) create mode 100644 scilpy/reconst/tests/test_aodf.py create mode 100644 scilpy/reconst/tests/test_bingham.py create mode 100644 scilpy/reconst/tests/test_divide.py create mode 100644 scilpy/reconst/tests/test_dwi.py create mode 100644 scilpy/reconst/tests/test_fiber_coherence.py create mode 100644 scilpy/reconst/tests/test_fodf.py create mode 100644 scilpy/reconst/tests/test_frf.py create mode 100644 scilpy/reconst/tests/test_mti.py create mode 100644 scilpy/reconst/tests/test_sh.py create mode 100644 scilpy/reconst/tests/test_utils.py diff --git a/docs/source/modules/scilpy.reconst.rst b/docs/source/modules/scilpy.reconst.rst index 40c0b66d0..f6b9bed03 100644 --- a/docs/source/modules/scilpy.reconst.rst +++ b/docs/source/modules/scilpy.reconst.rst @@ -25,14 +25,6 @@ scilpy.reconst.divide module :undoc-members: :show-inheritance: -scilpy.reconst.dti module ---------------------------------------------- - -.. automodule:: scilpy.reconst.dti - :members: - :undoc-members: - :show-inheritance: - scilpy.reconst.dwi module --------------------------------- diff --git a/scilpy/reconst/dti.py b/scilpy/io/tensor.py similarity index 100% rename from scilpy/reconst/dti.py rename to scilpy/io/tensor.py diff --git a/scilpy/reconst/tests/test_aodf.py b/scilpy/reconst/tests/test_aodf.py new file mode 100644 index 000000000..0a6e1e4cf --- /dev/null +++ b/scilpy/reconst/tests/test_aodf.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- + + +def test_compute_asymmetry_index(): + # toDO + pass + + +def test_compute_odd_power_map(): + # toDO + pass diff --git a/scilpy/reconst/tests/test_bingham.py b/scilpy/reconst/tests/test_bingham.py new file mode 100644 index 000000000..a4f855054 --- /dev/null +++ b/scilpy/reconst/tests/test_bingham.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + + +def test_bingham_to_sf(): + # toDO + pass + + +def test_bingham_to_peak_direction(): + # toDO + pass + + +def test_bingham_fit_sh(): + # toDO + pass + + +def test_compute_fiber_density(): + # toDO + pass + + +def test_compute_fiber_spread(): + # toDO + pass + + +def test_compute_fiber_fraction(): + # toDO + pass diff --git a/scilpy/reconst/tests/test_divide.py b/scilpy/reconst/tests/test_divide.py new file mode 100644 index 000000000..6b2c35a93 --- /dev/null +++ b/scilpy/reconst/tests/test_divide.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- + + +def test_gamma_data2fit(): + # toDO + pass + + +def test_gamma_fit2data(): + # toDO + pass + + +def test_gamma_fit2metrics(): + # toDO + pass + + +def test_fit_gamma(): + # toDO + pass diff --git a/scilpy/reconst/tests/test_dwi.py b/scilpy/reconst/tests/test_dwi.py new file mode 100644 index 000000000..9d76ba41d --- /dev/null +++ b/scilpy/reconst/tests/test_dwi.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- + + +def test_compute_sh_coefficients(): + # toDO + pass diff --git a/scilpy/reconst/tests/test_fiber_coherence.py b/scilpy/reconst/tests/test_fiber_coherence.py new file mode 100644 index 000000000..3f6feaa95 --- /dev/null +++ b/scilpy/reconst/tests/test_fiber_coherence.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- + + +def test_compute_fiber_coherence_table(): + # toDO + pass + + +def test_compute_fiber_coherence(): + # toDO + pass diff --git a/scilpy/reconst/tests/test_fodf.py b/scilpy/reconst/tests/test_fodf.py new file mode 100644 index 000000000..db016b324 --- /dev/null +++ b/scilpy/reconst/tests/test_fodf.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- + + +def test_get_ventricles_max_fodf(): + # toDO + pass + + +def test_fit_from_model(): + # toDO + pass diff --git a/scilpy/reconst/tests/test_frf.py b/scilpy/reconst/tests/test_frf.py new file mode 100644 index 000000000..6d9db9e32 --- /dev/null +++ b/scilpy/reconst/tests/test_frf.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- + + +def test_compute_ssst_frf(): + # toDO + pass + + +def test_compute_msmt_frf(): + # toDO + pass diff --git a/scilpy/reconst/tests/test_mti.py b/scilpy/reconst/tests/test_mti.py new file mode 100644 index 000000000..50c06499d --- /dev/null +++ b/scilpy/reconst/tests/test_mti.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- + + +def test_py_fspecial_gauss(): + # toDO + pass + + +def test_compute_contrasts_maps(): + # toDO + pass + + +def test_compute_saturation(): + # toDO + pass + + +def test_compute_ihMT_maps(): + # toDO + pass + + +def test_compute_MT_maps_from_ihMT(): + # toDO + pass + + +def test_compute_MT_maps(): + # toDO + pass + + +def test_threshold_maps(): + # toDO + pass + + +def test_apply_B1_correction(): + # toDO + pass diff --git a/scilpy/reconst/tests/test_sh.py b/scilpy/reconst/tests/test_sh.py new file mode 100644 index 000000000..04a425a5f --- /dev/null +++ b/scilpy/reconst/tests/test_sh.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- + + +def test_compute_rish(): + # toDO + pass + + +def test_peaks_from_sh(): + # toDO + pass + + +def test_maps_from_sh(): + # toDO + pass + + +def test_convert_sh_basis(): + # toDO + pass + + +def test_convert_sh_to_sf(): + # toDO + pass diff --git a/scilpy/reconst/tests/test_utils.py b/scilpy/reconst/tests/test_utils.py new file mode 100644 index 000000000..53bd10779 --- /dev/null +++ b/scilpy/reconst/tests/test_utils.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- + + +def test_get_sh_order_and_fullness(): + # toDO + pass + + +def test_sh_basis(): + # toDO + pass + + +def test_get_b_matrix(): + # toDO + pass + + +def test_get_maximas(): + # toDO + pass + + +def test_get_sphere_neighbours(): + # toDO + pass diff --git a/scripts/scil_compute_dti_metrics.py b/scripts/scil_compute_dti_metrics.py index 0e791391f..c4071d194 100755 --- a/scripts/scil_compute_dti_metrics.py +++ b/scripts/scil_compute_dti_metrics.py @@ -42,9 +42,10 @@ from scilpy.io.image import get_data_as_mask from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, assert_outputs_exist, add_force_b0_arg) -from scilpy.reconst.dti import convert_tensor_from_dipy_format, \ +from scilpy.io.tensor import convert_tensor_from_dipy_format, \ supported_tensor_formats, tensor_format_description -from scilpy.gradients.bvec_bval_tools import (normalize_bvecs, is_normalized_bvecs, +from scilpy.gradients.bvec_bval_tools import (normalize_bvecs, + is_normalized_bvecs, check_b0_threshold) from scilpy.utils.filenames import add_filename_suffix, split_name_with_nii diff --git a/scripts/scil_convert_tensors.py b/scripts/scil_convert_tensors.py index 0b68ca7e6..1b975f10e 100755 --- a/scripts/scil_convert_tensors.py +++ b/scripts/scil_convert_tensors.py @@ -13,9 +13,9 @@ from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, assert_outputs_exist) -from scilpy.reconst.dti import (supported_tensor_formats, - tensor_format_description, - convert_tensor_format) +from scilpy.io.tensor import (supported_tensor_formats, + tensor_format_description, + convert_tensor_format) def _build_arg_parser():