From f72500d70dd065778a90fa68f53c2a6c70dcee46 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Thu, 28 Jul 2022 15:28:50 -0400 Subject: [PATCH 01/11] Shift centers of mass into clusters in Jackknife/FocusCounter (#735) * Move centers of mass to nearest in-cluster voxel. * Don't mention COMs outside of clusters. * Convert COMs to ints. * Fix scipy stuff. * Add custom license section. * Add license to TtoZ as well. --- docs/conf.py | 1 + .../misc-notebooks/save_nidm_to_dset.ipynb | 4 +- nimare/diagnostics.py | 36 ++--- nimare/meta/cbma/base.py | 2 +- nimare/meta/cbma/mkda.py | 4 +- nimare/meta/utils.py | 6 +- nimare/tests/test_meta_kernel.py | 2 +- nimare/transforms.py | 21 +++ nimare/utils.py | 135 ++++++++++++++++++ 9 files changed, 175 insertions(+), 36 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index b278fe173..f755d1c8d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -109,6 +109,7 @@ # ----------------------------------------------------------------------------- napoleon_google_docstring = False napoleon_numpy_docstring = True +napoleon_custom_sections = ["License"] napoleon_include_init_with_doc = True napoleon_include_private_with_doc = False napoleon_include_special_with_doc = False diff --git a/examples/misc-notebooks/save_nidm_to_dset.ipynb b/examples/misc-notebooks/save_nidm_to_dset.ipynb index db729d93e..94e6ea552 100755 --- a/examples/misc-notebooks/save_nidm_to_dset.ipynb +++ b/examples/misc-notebooks/save_nidm_to_dset.ipynb @@ -181,7 +181,7 @@ " binarized[binarized>0] = 1\n", " binarized[binarized<0] = 0\n", " binarized = binarized.astype(int)\n", - " labeled = ndimage.measurements.label(binarized, np.ones((3, 3, 3)))[0]\n", + " labeled = ndimage.label(binarized, np.ones((3, 3, 3)))[0]\n", " clust_ids = sorted(list(np.unique(labeled)[1:]))\n", " ijk = np.hstack([np.where(data * (labeled == c) == np.max(data * (labeled == c))) for c in clust_ids])\n", " ijk = ijk.T\n", @@ -259,7 +259,7 @@ " binarized[binarized>0] = 1\n", " binarized[binarized<0] = 0\n", " binarized = binarized.astype(int)\n", - " labeled = ndimage.measurements.label(binarized, np.ones((3, 3, 3)))[0]\n", + " labeled = ndimage.label(binarized, np.ones((3, 3, 3)))[0]\n", " clust_ids = sorted(list(np.unique(labeled)[1:]))\n", " \n", " peak_vals = np.array([np.max(data * (labeled == c)) for c in clust_ids])\n", diff --git a/nimare/diagnostics.py b/nimare/diagnostics.py index ed2250ff5..47c6a8936 100644 --- a/nimare/diagnostics.py +++ b/nimare/diagnostics.py @@ -13,7 +13,7 @@ from tqdm.auto import tqdm from nimare.base import NiMAREBase -from nimare.utils import _check_ncores, mm2vox, tqdm_joblib, vox2mm +from nimare.utils import _check_ncores, _get_cluster_coms, mm2vox, tqdm_joblib, vox2mm LGR = logging.getLogger(__name__) @@ -77,8 +77,6 @@ def transform(self, result): cluster in the thresholded map. There is one row for each experiment, as well as one more row at the top of the table (below the header), which has the center of mass of each cluster. - The centers of mass are not guaranteed to fall within the actual clusters, but can - serve as a useful heuristic for identifying them. There is one column for each cluster, with column names being integers indicating the cluster's associated value in the ``labeled_cluster_img`` output. labeled_cluster_img : :obj:`nibabel.nifti1.Nifti1Image` @@ -135,7 +133,7 @@ def transform(self, result): # Let's label the clusters in the thresholded map so we can use it as a NiftiLabelsMasker # This won't work when the Estimator's masker isn't a NiftiMasker... :( conn = ndimage.generate_binary_structure(3, 2) - labeled_cluster_arr, n_clusters = ndimage.measurements.label(thresh_arr, conn) + labeled_cluster_arr, n_clusters = ndimage.label(thresh_arr, conn) labeled_cluster_img = nib.Nifti1Image( labeled_cluster_arr, affine=target_img.affine, @@ -147,19 +145,11 @@ def transform(self, result): contribution_table = pd.DataFrame(index=rows) return contribution_table, labeled_cluster_img - # Identify center of mass for each cluster - # This COM may fall outside the cluster, but it is a useful heuristic for identifying them - cluster_ids = list(range(1, n_clusters + 1)) - cluster_coms = ndimage.center_of_mass( - labeled_cluster_arr, - labeled_cluster_arr, - cluster_ids, - ) - cluster_coms = np.array(cluster_coms) + cluster_coms = _get_cluster_coms(labeled_cluster_arr) cluster_coms = vox2mm(cluster_coms, target_img.affine) cluster_com_strs = [] - for i_peak in range(len(cluster_ids)): + for i_peak in range(cluster_coms.shape[0]): x, y, z = cluster_coms[i_peak, :].astype(int) xyz_str = f"({x}, {y}, {z})" cluster_com_strs.append(xyz_str) @@ -169,7 +159,7 @@ def transform(self, result): cluster_masker.fit(labeled_cluster_img) # Create empty contribution table - contribution_table = pd.DataFrame(index=rows, columns=cluster_ids) + contribution_table = pd.DataFrame(index=rows, columns=list(range(1, n_clusters + 1))) contribution_table.index.name = "Cluster ID" contribution_table.loc["Center of Mass"] = cluster_com_strs @@ -295,8 +285,6 @@ def transform(self, result): cluster in the thresholded map. There is one row for each experiment, as well as one more row at the top of the table (below the header), which has the center of mass of each cluster. - The centers of mass are not guaranteed to fall within the actual clusters, but can - serve as a useful heuristic for identifying them. There is one column for each cluster, with column names being integers indicating the cluster's associated value in the ``labeled_cluster_img`` output. labeled_cluster_img : :obj:`nibabel.nifti1.Nifti1Image` @@ -341,7 +329,7 @@ def transform(self, result): # Let's label the clusters in the thresholded map so we can use it as a NiftiLabelsMasker # This won't work when the Estimator's masker isn't a NiftiMasker... :( conn = ndimage.generate_binary_structure(3, 2) - labeled_cluster_arr, n_clusters = ndimage.measurements.label(thresh_arr, conn) + labeled_cluster_arr, n_clusters = ndimage.label(thresh_arr, conn) labeled_cluster_img = nib.Nifti1Image( labeled_cluster_arr, affine=target_img.affine, @@ -353,23 +341,17 @@ def transform(self, result): contribution_table = pd.DataFrame(index=rows) return contribution_table, labeled_cluster_img - # Identify center of mass for each cluster - # This COM may fall outside the cluster, but it is a useful heuristic for identifying them - cluster_ids = list(range(1, n_clusters + 1)) - cluster_coms = ndimage.center_of_mass( - labeled_cluster_arr, labeled_cluster_arr, cluster_ids - ) - cluster_coms = np.array(cluster_coms) + cluster_coms = _get_cluster_coms(labeled_cluster_arr) cluster_coms = vox2mm(cluster_coms, target_img.affine) cluster_com_strs = [] - for i_peak in range(len(cluster_ids)): + for i_peak in range(cluster_coms.shape[0]): x, y, z = cluster_coms[i_peak, :].astype(int) xyz_str = f"({x}, {y}, {z})" cluster_com_strs.append(xyz_str) # Create empty contribution table - contribution_table = pd.DataFrame(index=rows, columns=cluster_ids) + contribution_table = pd.DataFrame(index=rows, columns=list(range(1, n_clusters + 1))) contribution_table.index.name = "Cluster ID" contribution_table.loc["Center of Mass"] = cluster_com_strs diff --git a/nimare/meta/cbma/base.py b/nimare/meta/cbma/base.py index c18dc63e8..354f5b89c 100644 --- a/nimare/meta/cbma/base.py +++ b/nimare/meta/cbma/base.py @@ -751,7 +751,7 @@ def correct_fwe_montecarlo( # cluster-label thresh_stat_values = self.masker.inverse_transform(stat_values).get_fdata() thresh_stat_values[thresh_stat_values <= ss_thresh] = 0 - labeled_matrix, _ = ndimage.measurements.label(thresh_stat_values, conn) + labeled_matrix, _ = ndimage.label(thresh_stat_values, conn) cluster_labels, idx, cluster_sizes = np.unique( labeled_matrix, diff --git a/nimare/meta/cbma/mkda.py b/nimare/meta/cbma/mkda.py index ba4af86ae..735ada5c5 100644 --- a/nimare/meta/cbma/mkda.py +++ b/nimare/meta/cbma/mkda.py @@ -606,9 +606,9 @@ def _apply_correction(self, stat_values, voxel_thresh, vfwe_null, csfwe_null, cm # Label positive and negative clusters separately labeled_matrix = np.empty(stat_map_thresh.shape, int) - labeled_matrix, _ = ndimage.measurements.label(stat_map_thresh > 0, conn) + labeled_matrix, _ = ndimage.label(stat_map_thresh > 0, conn) n_positive_clusters = np.max(labeled_matrix) - temp_labeled_matrix, _ = ndimage.measurements.label(stat_map_thresh < 0, conn) + temp_labeled_matrix, _ = ndimage.label(stat_map_thresh < 0, conn) temp_labeled_matrix[temp_labeled_matrix > 0] += n_positive_clusters labeled_matrix = labeled_matrix + temp_labeled_matrix del temp_labeled_matrix diff --git a/nimare/meta/utils.py b/nimare/meta/utils.py index 0e099bd58..4568b2de7 100755 --- a/nimare/meta/utils.py +++ b/nimare/meta/utils.py @@ -322,7 +322,7 @@ def get_ale_kernel(img, sample_size=None, fwhm=None): data = np.zeros((31, 31, 31)) mid = int(np.floor(data.shape[0] / 2.0)) data[mid, mid, mid] = 1.0 - kernel = ndimage.filters.gaussian_filter(data, sigma_vox, mode="constant") + kernel = ndimage.gaussian_filter(data, sigma_vox, mode="constant") # Crop kernel to drop surrounding zeros mn = np.min(np.where(kernel > np.spacing(1))[0]) @@ -368,12 +368,12 @@ def _calculate_cluster_measures(arr3d, threshold, conn, tail="upper"): arr3d[np.abs(arr3d) <= threshold] = 0 labeled_arr3d = np.empty(arr3d.shape, int) - labeled_arr3d, _ = ndimage.measurements.label(arr3d > 0, conn) + labeled_arr3d, _ = ndimage.label(arr3d > 0, conn) if tail == "two": # Label positive and negative clusters separately n_positive_clusters = np.max(labeled_arr3d) - temp_labeled_arr3d, _ = ndimage.measurements.label(arr3d < 0, conn) + temp_labeled_arr3d, _ = ndimage.label(arr3d < 0, conn) temp_labeled_arr3d[temp_labeled_arr3d > 0] += n_positive_clusters labeled_arr3d = labeled_arr3d + temp_labeled_arr3d del temp_labeled_arr3d diff --git a/nimare/tests/test_meta_kernel.py b/nimare/tests/test_meta_kernel.py index 553459e0b..9fe416612 100644 --- a/nimare/tests/test_meta_kernel.py +++ b/nimare/tests/test_meta_kernel.py @@ -2,7 +2,7 @@ import nibabel as nib import numpy as np import pytest -from scipy.ndimage.measurements import center_of_mass +from scipy.ndimage import center_of_mass from nimare.meta import kernel from nimare.utils import get_masker, get_template, mm2vox diff --git a/nimare/transforms.py b/nimare/transforms.py index 952f6909d..1434a2068 100644 --- a/nimare/transforms.py +++ b/nimare/transforms.py @@ -717,6 +717,27 @@ def t_to_z(t_values, dof): z_values : array_like Z-statistics + License + ------- + The MIT License (MIT) + Copyright (c) 2015 Vanessa Sochat + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software + and associated documentation files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all copies or + substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, + INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE + FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + References ---------- .. footbibliography:: diff --git a/nimare/utils.py b/nimare/utils.py index 66d95311b..e0eca68d6 100755 --- a/nimare/utils.py +++ b/nimare/utils.py @@ -15,6 +15,7 @@ import numpy as np import pandas as pd from nilearn.input_data import NiftiMasker +from scipy import ndimage from nimare import references from nimare.due import due @@ -1025,6 +1026,8 @@ def unique_rows(ar, return_counts=False): array([[0, 1, 0], [1, 0, 1]], dtype=uint8) + License + ------- Copyright (C) 2019, the scikit-image team All rights reserved. """ @@ -1046,3 +1049,135 @@ def unique_rows(ar, return_counts=False): _, unique_row_indices = np.unique(ar_row_view, return_index=True) return ar[unique_row_indices] + + +def _cluster_nearest_neighbor(ijk, labels_index, labeled): + """Find the nearest neighbor for given points in the corresponding cluster. + + Parameters + ---------- + ijk : :obj:`numpy.ndarray` + (n_pts, 3) array of query points. + labels_index : :obj:`numpy.ndarray` + (n_pts,) array of corresponding cluster indices. + labeled : :obj:`numpy.ndarray` + 3D array with voxels labeled according to cluster index. + + Returns + ------- + nbrs : :obj:`numpy.ndarray` + (n_pts, 3) nearest neighbor points. + + This function is partially derived from Nilearn's code. + + License + ------- + New BSD License + + Copyright (c) 2007 - 2022 The nilearn developers. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of the nilearn developers nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + DAMAGE. + """ + labels = labeled[labeled > 0] + clusters_ijk = np.array(labeled.nonzero()).T + nbrs = np.zeros_like(ijk) + for ii, (lab, point) in enumerate(zip(labels_index, ijk)): + lab_ijk = clusters_ijk[labels == lab] + dist = np.linalg.norm(lab_ijk - point, axis=1) + nbrs[ii] = lab_ijk[np.argmin(dist)] + + return nbrs + + +def _get_cluster_coms(labeled_cluster_arr): + """Get the center of mass of each cluster in a labeled array. + + This function is partially derived from Nilearn's code. + + License + ------- + New BSD License + + Copyright (c) 2007 - 2022 The nilearn developers. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of the nilearn developers nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + DAMAGE. + """ + cluster_ids = np.unique(labeled_cluster_arr)[1:] + n_clusters = cluster_ids.size + + # Identify center of mass for each cluster + # This COM may fall outside the cluster, but it is a useful heuristic for identifying them + cluster_ids = np.arange(1, n_clusters + 1, dtype=int) + cluster_coms = ndimage.center_of_mass(labeled_cluster_arr, labeled_cluster_arr, cluster_ids) + cluster_coms = np.array(cluster_coms).astype(int) + + # NOTE: The following comes from Nilearn + # Determine if all subpeaks are within the cluster + # They may not be if the cluster is binary and has a shape where the COM is + # outside the cluster, like a donut. + coms_outside_clusters = ( + labeled_cluster_arr[cluster_coms[:, 0], cluster_coms[:, 1], cluster_coms[:, 2]] + != cluster_ids + ) + if np.any(coms_outside_clusters): + LGR.warning( + "Attention: At least one of the centers of mass falls outside of the cluster body. " + "Identifying the nearest in-cluster voxel." + ) + + # Replace centers of mass with their nearest neighbor points in the + # corresponding clusters. Note this is also equivalent to computing the + # centers of mass constrained to points within the cluster. + cluster_coms[coms_outside_clusters, :] = _cluster_nearest_neighbor( + cluster_coms[coms_outside_clusters, :], + cluster_ids[coms_outside_clusters], + labeled_cluster_arr, + ) + + return cluster_coms From e07673da64ff068fb7a6c5bc0b87ef3198f657ee Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Thu, 28 Jul 2022 15:31:35 -0400 Subject: [PATCH 02/11] Remove duecredit in favor of BibTeX references (#736) * Remove duecredit and references submodules. * Remove duecredit dependencies. --- .codecov.yml | 2 - nimare/annotate/cogat.py | 4 - nimare/annotate/gclda.py | 7 -- nimare/annotate/lda.py | 7 -- nimare/decode/continuous.py | 4 - nimare/decode/discrete.py | 8 -- nimare/decode/encode.py | 3 - nimare/due.py | 65 ------------ nimare/meta/cbma/ale.py | 19 ---- nimare/meta/cbma/mkda.py | 6 -- nimare/meta/kernel.py | 10 -- nimare/meta/utils.py | 3 - nimare/references.py | 198 ------------------------------------ nimare/transforms.py | 4 - nimare/utils.py | 23 ----- pyproject.toml | 1 - setup.cfg | 5 +- 17 files changed, 1 insertion(+), 368 deletions(-) delete mode 100644 nimare/due.py delete mode 100644 nimare/references.py diff --git a/.codecov.yml b/.codecov.yml index ea9b6c7e5..1e82480d9 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -16,6 +16,4 @@ coverage: ignore: - 'nimare/tests/' - - 'nimare/due.py' - 'nimare/_version.py' - - 'nimare/references.py' diff --git a/nimare/annotate/cogat.py b/nimare/annotate/cogat.py index a6264598a..1f26d6dd5 100755 --- a/nimare/annotate/cogat.py +++ b/nimare/annotate/cogat.py @@ -5,16 +5,13 @@ import numpy as np import pandas as pd -from nimare import references from nimare.annotate import utils -from nimare.due import due from nimare.extract import download_cognitive_atlas from nimare.utils import _uk_to_us LGR = logging.getLogger(__name__) -@due.dcite(references.COGNITIVE_ATLAS, description="Introduces the Cognitive Atlas.") class CogAtLemmatizer(object): """Replace synonyms and abbreviations with Cognitive Atlas identifiers in text. @@ -94,7 +91,6 @@ def transform(self, text, convert_uk=True): return text -@due.dcite(references.COGNITIVE_ATLAS, description="Introduces the Cognitive Atlas.") def extract_cogat(text_df, id_df=None, text_column="abstract"): """Extract Cognitive Atlas terms and count instances using regular expressions. diff --git a/nimare/annotate/gclda.py b/nimare/annotate/gclda.py index 658870421..4668f8b6b 100755 --- a/nimare/annotate/gclda.py +++ b/nimare/annotate/gclda.py @@ -8,15 +8,12 @@ from nilearn._utils import load_niimg from scipy.stats import multivariate_normal -from nimare import references from nimare.base import NiMAREBase -from nimare.due import due from nimare.utils import get_template LGR = logging.getLogger(__name__) -@due.dcite(references.GCLDAMODEL) class GCLDAModel(NiMAREBase): """Generate a generalized correspondence latent Dirichlet allocation (GCLDA) topic model. @@ -717,10 +714,6 @@ def _update_regions(self): self.topics["regions_mu"][i_topic, j_region, ...] = mu self.topics["regions_sigma"][i_topic, j_region, ...] = sigma - @due.dcite( - references.LOG_LIKELIHOOD, - description="Describes method for computing log-likelihood used in model.", - ) def compute_log_likelihood(self, model=None, update_vectors=True): """Compute log-likelihood of a model object given current model. diff --git a/nimare/annotate/lda.py b/nimare/annotate/lda.py index 513f5eee6..f113cc7cf 100644 --- a/nimare/annotate/lda.py +++ b/nimare/annotate/lda.py @@ -2,17 +2,10 @@ import pandas as pd from sklearn.decomposition import LatentDirichletAllocation -from nimare import references from nimare.annotate.text import generate_counts from nimare.base import NiMAREBase -from nimare.due import due -@due.dcite(references.LDA, description="Introduces LDA.") -@due.dcite( - references.LDAMODEL, - description="First use of LDA for automated annotation of neuroimaging literature.", -) class LDAModel(NiMAREBase): """Generate a latent Dirichlet allocation (LDA) topic model. diff --git a/nimare/decode/continuous.py b/nimare/decode/continuous.py index 11fa7861e..afd161e38 100755 --- a/nimare/decode/continuous.py +++ b/nimare/decode/continuous.py @@ -8,10 +8,8 @@ from nilearn.masking import apply_mask from tqdm.auto import tqdm -from nimare import references from nimare.decode.base import Decoder from nimare.decode.utils import weight_priors -from nimare.due import due from nimare.meta.cbma.base import CBMAEstimator from nimare.meta.cbma.mkda import MKDAChi2 from nimare.stats import pearson @@ -20,7 +18,6 @@ LGR = logging.getLogger(__name__) -@due.dcite(references.GCLDA_DECODING, description="Describes decoding methods using GC-LDA.") def gclda_decode_map(model, image, topic_priors=None, prior_weight=1): r"""Perform image-to-text decoding for continuous inputs using method from Rubin et al. (2017). @@ -110,7 +107,6 @@ def gclda_decode_map(model, image, topic_priors=None, prior_weight=1): return decoded_df, topic_weights -@due.dcite(references.NEUROSYNTH, description="Introduces Neurosynth.") class CorrelationDecoder(Decoder): """Decode an unthresholded image by correlating the image with meta-analytic maps. diff --git a/nimare/decode/discrete.py b/nimare/decode/discrete.py index cd0606d3a..ffb5ee86f 100755 --- a/nimare/decode/discrete.py +++ b/nimare/decode/discrete.py @@ -6,17 +6,14 @@ from scipy import special from scipy.stats import binom -from nimare import references from nimare.decode.base import Decoder from nimare.decode.utils import weight_priors -from nimare.due import due from nimare.meta.kernel import KernelTransformer, MKDAKernel from nimare.stats import one_way, pearson, two_way from nimare.transforms import p_to_z from nimare.utils import _check_type, get_masker -@due.dcite(references.GCLDA_DECODING, description="Citation for GCLDA decoding.") def gclda_decode_roi(model, roi, topic_priors=None, prior_weight=1.0): r"""Perform image-to-text decoding for discrete inputs using method from Rubin et al. (2017). @@ -113,7 +110,6 @@ def gclda_decode_roi(model, roi, topic_priors=None, prior_weight=1.0): return decoded_df, topic_weights -@due.dcite(references.BRAINMAP_DECODING, description="Citation for BrainMap-style decoding.") class BrainMapDecoder(Decoder): """Perform image-to-text decoding for discrete inputs according to the BrainMap method. @@ -212,7 +208,6 @@ def transform(self, ids, ids2=None): return results -@due.dcite(references.BRAINMAP_DECODING, description="Citation for BrainMap-style decoding.") def brainmap_decode( coordinates, annotations, @@ -388,7 +383,6 @@ def brainmap_decode( return out_df -@due.dcite(references.NEUROSYNTH, description="Introduces Neurosynth.") class NeurosynthDecoder(Decoder): """Perform discrete functional decoding according to Neurosynth's meta-analytic method. @@ -499,7 +493,6 @@ def transform(self, ids, ids2=None): return results -@due.dcite(references.NEUROSYNTH, description="Introduces Neurosynth.") def neurosynth_decode( coordinates, annotations, @@ -672,7 +665,6 @@ def neurosynth_decode( return out_df -@due.dcite(references.NEUROSYNTH, description="Introduces Neurosynth.") class ROIAssociationDecoder(Decoder): """Perform discrete functional decoding according to Neurosynth's ROI association method. diff --git a/nimare/decode/encode.py b/nimare/decode/encode.py index 8355e6215..b4f335009 100755 --- a/nimare/decode/encode.py +++ b/nimare/decode/encode.py @@ -3,12 +3,9 @@ from nilearn.masking import unmask from sklearn.feature_extraction.text import CountVectorizer -from nimare import references from nimare.decode.utils import weight_priors -from nimare.due import due -@due.dcite(references.GCLDA_DECODING, description="Citation for GCLDA encoding.") def gclda_encode(model, text, out_file=None, topic_priors=None, prior_weight=1.0): r"""Perform text-to-image encoding according to the method described in Rubin et al. (2017). diff --git a/nimare/due.py b/nimare/due.py deleted file mode 100644 index 743142a8c..000000000 --- a/nimare/due.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -Stub file for a guaranteed safe import of duecredit constructs: if duecredit -is not available. -To use it, place it into your project codebase to be imported, e.g. copy as - cp stub.py /path/tomodule/module/due.py -Note that it might be better to avoid naming it duecredit.py to avoid shadowing -installed duecredit. -Then use in your code as - from nimare.due import due, Doi, BibTeX -See https://github.com/duecredit/duecredit/blob/master/README.md for examples. -Origin: Originally a part of the duecredit -Copyright: 2015-2016 DueCredit developers -License: BSD-2 -""" - -__version__ = "0.0.5" - - -class InactiveDueCreditCollector(object): - """Just a stub at the Collector which would not do anything""" - - def _donothing(self, *args, **kwargs): - """Perform no good and no bad""" - pass - - def dcite(self, *args, **kwargs): - """If I could cite I would""" - - def nondecorating_decorator(func): - return func - - return nondecorating_decorator - - cite = load = add = _donothing - - def __repr__(self): - return self.__class__.__name__ + "()" - - -def _donothing_func(*args, **kwargs): - """Perform no good and no bad""" - pass - - -try: - from duecredit import BibTeX, Doi, Url, due - - if "due" in locals() and not hasattr(due, "cite"): - raise RuntimeError("Imported due lacks .cite. DueCredit is now disabled") -except Exception as e: - if type(e).__name__ != "ImportError": - import logging - - logging.getLogger("duecredit").error("Failed to import duecredit due to %s" % str(e)) - # Initiate due stub - due = InactiveDueCreditCollector() - BibTeX = Doi = Url = _donothing_func - -# Emacs mode definitions -# Local Variables: -# mode: python -# py-indent-offset: 4 -# tab-width: 4 -# indent-tabs-mode: nil -# End: diff --git a/nimare/meta/cbma/ale.py b/nimare/meta/cbma/ale.py index abe41766e..989f8d661 100755 --- a/nimare/meta/cbma/ale.py +++ b/nimare/meta/cbma/ale.py @@ -7,8 +7,6 @@ from joblib import Parallel, delayed from tqdm.auto import tqdm -from nimare import references -from nimare.due import due from nimare.meta.cbma.base import CBMAEstimator, PairwiseCBMAEstimator from nimare.meta.kernel import ALEKernel from nimare.stats import null_to_p, nullhist_to_p @@ -18,19 +16,6 @@ LGR = logging.getLogger(__name__) -@due.dcite(references.ALE1, description="Introduces ALE.") -@due.dcite( - references.ALE2, - description="Modifies ALE algorithm to eliminate within-experiment " - "effects and generate MA maps based on subject group " - "instead of experiment.", -) -@due.dcite( - references.ALE3, - description="Modifies ALE algorithm to allow FWE correction and to " - "more quickly and accurately generate the null " - "distribution for significance testing.", -) class ALE(CBMAEstimator): """Activation likelihood estimation. @@ -500,10 +485,6 @@ def correct_fwe_montecarlo(self): ) -@due.dcite( - references.SCALE, - description=("Introduces the specific co-activation likelihood estimation (SCALE) algorithm."), -) class SCALE(CBMAEstimator): r"""Specific coactivation likelihood estimation. diff --git a/nimare/meta/cbma/mkda.py b/nimare/meta/cbma/mkda.py index 735ada5c5..517a824bc 100644 --- a/nimare/meta/cbma/mkda.py +++ b/nimare/meta/cbma/mkda.py @@ -10,8 +10,6 @@ from scipy.stats import chi2 from tqdm.auto import tqdm -from nimare import references -from nimare.due import due from nimare.meta.cbma.base import CBMAEstimator, PairwiseCBMAEstimator from nimare.meta.kernel import KDAKernel, MKDAKernel from nimare.meta.utils import _calculate_cluster_measures @@ -22,7 +20,6 @@ LGR = logging.getLogger(__name__) -@due.dcite(references.MKDA, description="Introduces MKDA.") class MKDADensity(CBMAEstimator): r"""Multilevel kernel density analysis- Density analysis. @@ -249,7 +246,6 @@ def _compute_null_approximate(self, ma_maps): self.null_distributions_["histweights_corr-none_method-approximate"] = ss_hist -@due.dcite(references.MKDA, description="Introduces MKDA.") class MKDAChi2(PairwiseCBMAEstimator): r"""Multilevel kernel density analysis- Chi-square analysis. @@ -942,8 +938,6 @@ def correct_fdr_indep(self, result, alpha=0.05): return images -@due.dcite(references.KDA1, description="Introduces the KDA algorithm.") -@due.dcite(references.KDA2, description="Also introduces the KDA algorithm.") class KDA(CBMAEstimator): r"""Kernel density analysis. diff --git a/nimare/meta/kernel.py b/nimare/meta/kernel.py index eb238f467..3dbbafe7f 100644 --- a/nimare/meta/kernel.py +++ b/nimare/meta/kernel.py @@ -15,9 +15,7 @@ import pandas as pd import sparse -from nimare import references from nimare.base import NiMAREBase -from nimare.due import due from nimare.meta.utils import compute_ale_ma, compute_kda_ma, get_ale_kernel from nimare.utils import _add_metadata_to_dataframe, _safe_transform, mm2vox @@ -259,14 +257,6 @@ def _transform(self, mask, coordinates): pass -@due.dcite( - references.ALE2, - description=( - "Modifies ALE algorithm to eliminate within-experiment " - "effects and generate MA maps based on subject group " - "instead of experiment." - ), -) class ALEKernel(KernelTransformer): """Generate ALE modeled activation images from coordinates and sample size. diff --git a/nimare/meta/utils.py b/nimare/meta/utils.py index 4568b2de7..9d75ddd7f 100755 --- a/nimare/meta/utils.py +++ b/nimare/meta/utils.py @@ -6,8 +6,6 @@ import sparse from scipy import ndimage -from nimare import references -from nimare.due import due from nimare.utils import unique_rows LGR = logging.getLogger(__name__) @@ -297,7 +295,6 @@ def compute_ale_ma(mask, ijks, kernel=None, exp_idx=None, sample_sizes=None, use return kernel_data -@due.dcite(references.ALE_KERNEL, description="Introduces sample size-dependent kernels to ALE.") def get_ale_kernel(img, sample_size=None, fwhm=None): """Estimate 3D Gaussian and sigma (in voxels) for ALE kernel given sample size or fwhm.""" if sample_size is not None and fwhm is not None: diff --git a/nimare/references.py b/nimare/references.py deleted file mode 100644 index a9d9f28a9..000000000 --- a/nimare/references.py +++ /dev/null @@ -1,198 +0,0 @@ -"""References to be imported and injected at relevant places throughout the library.""" -from nimare.due import BibTeX, Doi - -TEXT2BRAIN = Doi("https://doi.org/10.1007/978-3-030-00931-1_67") - -WORD2BRAIN = Doi("10.1101/299024") - -BOLTZMANNMODEL = BibTeX( - """ - @article{DBLP:journals/corr/MontiLLAM16, - author = {Ricardo Pio Monti and Romy Lorenz and Robert Leech and - Christoforos Anagnostopoulos and Giovanni Montana}, - title = {Text-mining the NeuroSynth corpus using Deep Boltzmann - Machines}, - journal = {CoRR}, - volume = {abs/1605.00223}, - year = {2016}, - url = {http://arxiv.org/abs/1605.00223}, - archivePrefix = {arXiv}, - eprint = {1605.00223}, - timestamp = {Wed, 07 Jun 2017 14:42:40 +0200}, - biburl = {https://dblp.org/rec/bib/journals/corr/MontiLLAM16}, - bibsource = {dblp computer science bibliography, https://dblp.org}} - """ -) - -GCLDAMODEL = Doi("10.1371/journal.pcbi.1005649") - -LDA = BibTeX( - """ - @article{blei2003latent, - title={Latent dirichlet allocation}, - author={Blei, David M and Ng, Andrew Y and Jordan, Michael I}, - journal={Journal of machine Learning research}, - volume={3}, - number={Jan}, - pages={993--1022}, - year={2003}} - """ -) - -MALLET = BibTeX( - """ - @article{mallettoolbox, - title={MALLET: A Machine Learning for Language Toolkit.}, - author={McCallum, Andrew K}, - year={2002}} - """ -) - -LDAMODEL = Doi("10.1371/journal.pcbi.1002707") - -COGNITIVE_ATLAS = Doi("10.3389/fninf.2011.00017") - -COGNITIVE_PARADIGM_ONTOLOGY = Doi("10.1007/s12021-011-9126-x") - -ATHENA = Doi("10.3389/fnins.2019.00494") - -LOG_LIKELIHOOD = Doi("10.1145/1577069.1755845") - -GCLDA_DECODING = Doi("10.1371/journal.pcbi.1005649") - -NEUROSYNTH = Doi("10.1038/nmeth.1635") - -BRAINMAP_DECODING = Doi("10.1007/s00429-013-0698-0") - -ALE1 = BibTeX( - """ - @article{turkeltaub2002meta, - title={Meta-analysis of the functional neuroanatomy of single-word - reading: method and validation}, - author={Turkeltaub, Peter E and Eden, Guinevere F and Jones, - Karen M and Zeffiro, Thomas A}, - journal={Neuroimage}, - volume={16}, - number={3}, - pages={765--780}, - year={2002}, - publisher={Elsevier} - } - """ -) - -ALE2 = Doi("10.1002/hbm.21186") - -ALE3 = Doi("10.1016/j.neuroimage.2011.09.017") - -ALE_KERNEL = Doi("10.1002/hbm.20718") - -SCALE = Doi("10.1016/j.neuroimage.2014.06.007") - -MKDA = Doi("10.1093/scan/nsm015") - -KDA1 = Doi("10.1016/S1053-8119(03)00078-8") - -KDA2 = Doi("10.1016/j.neuroimage.2004.03.052") - -BHICP = Doi("10.1198/jasa.2011.ap09735") - -HPGRF = BibTeX( - """ - @article{kang2014bayesian, - title={A Bayesian hierarchical spatial point process model for - multi-type neuroimaging meta-analysis}, - author={Kang, Jian and Nichols, Thomas E and Wager, Tor D and - Johnson, Timothy D}, - journal={The annals of applied statistics}, - volume={8}, - number={3}, - pages={1800}, - year={2014}, - publisher={NIH Public Access} - } - """ -) - -SBLFR = Doi("10.1111/biom.12713") - -SBR = Doi("10.1214/11-AOAS523") - -PEAKS2MAPS = Doi("10.7490/f1000research.1116395.1") - -FISHERS = BibTeX( - """ - @article{fisher1932statistical, - title={Statistical methods for research workers, Edinburgh: - Oliver and Boyd, 1925}, - author={Fisher, RA}, - journal={Google Scholar}, - year={1932} - } - """ -) - -STOUFFERS = BibTeX( - """ - @article{stouffer1949american, - title={The American soldier: Adjustment during army life. (Studies - in social psychology in World War II), Vol. 1}, - author={Stouffer, Samuel A and Suchman, Edward A and DeVinney, - Leland C and Star, Shirley A and Williams Jr, Robin M}, - year={1949}, - publisher={Princeton Univ. Press} - } - """ -) - -WEIGHTED_STOUFFERS = BibTeX( - """ - @article{zaykin2011optimally, - title={Optimally weighted Z-test is a powerful method for - combining probabilities in meta-analysis}, - author={Zaykin, Dmitri V}, - journal={Journal of evolutionary biology}, - volume={24}, - number={8}, - pages={1836--1841}, - year={2011}, - publisher={Wiley Online Library} - } - """ -) - -CBP = Doi("10.1002/hbm.22138") - -MAMP = Doi("10.1016/j.neuroimage.2015.08.027") - -MAPBOT = Doi("10.1016/j.neuroimage.2017.06.032") - -T2Z_TRANSFORM = BibTeX( - """ - @article{hughett2007accurate, - title={Accurate Computation of the F-to-z and t-to-z Transforms - for Large Arguments}, - author={Hughett, Paul and others}, - journal={Journal of Statistical Software}, - volume={23}, - number={1}, - pages={1--5}, - year={2007}, - publisher={Foundation for Open Access Statistics} - } - """ -) - -T2Z_IMPLEMENTATION = Doi("10.5281/zenodo.32508") - -LANCASTER_TRANSFORM = Doi("10.1002/hbm.20345") - -LANCASTER_TRANSFORM_VALIDATION = Doi("10.1016/j.neuroimage.2010.02.048") - -META_CLUSTER = Doi("10.1016/j.neuroimage.2015.06.044") - -META_CLUSTER2 = Doi("10.1162/netn_a_00050") - -META_ICA = Doi("10.1162/jocn_a_00077") - -META_ICA2 = Doi("10.1162/jocn_a_00077") diff --git a/nimare/transforms.py b/nimare/transforms.py index 1434a2068..793a7ffa4 100644 --- a/nimare/transforms.py +++ b/nimare/transforms.py @@ -11,9 +11,7 @@ from nilearn.reporting import get_clusters_table from scipy import stats -from nimare import references from nimare.base import NiMAREBase -from nimare.due import due from nimare.utils import _dict_to_coordinates, _dict_to_df, _listify, get_masker LGR = logging.getLogger(__name__) @@ -695,8 +693,6 @@ def p_to_z(p, tail="two"): return z -@due.dcite(references.T2Z_TRANSFORM, description="Introduces T-to-Z transform.") -@due.dcite(references.T2Z_IMPLEMENTATION, description="Python implementation of T-to-Z transform.") def t_to_z(t_values, dof): """Convert t-statistics to z-statistics. diff --git a/nimare/utils.py b/nimare/utils.py index e0eca68d6..1e33eaab7 100755 --- a/nimare/utils.py +++ b/nimare/utils.py @@ -17,9 +17,6 @@ from nilearn.input_data import NiftiMasker from scipy import ndimage -from nimare import references -from nimare.due import due - LGR = logging.getLogger(__name__) @@ -207,16 +204,6 @@ def mm2vox(xyz, affine): return ijk -@due.dcite( - references.LANCASTER_TRANSFORM, - description="Introduces the Lancaster MNI-to-Talairach transform, " - "as well as its inverse, the Talairach-to-MNI " - "transform.", -) -@due.dcite( - references.LANCASTER_TRANSFORM_VALIDATION, - description="Validates the Lancaster MNI-to-Talairach and Talairach-to-MNI transforms.", -) def tal2mni(coords): """Convert coordinates from Talairach space to MNI space. @@ -285,16 +272,6 @@ def tal2mni(coords): return out_coords -@due.dcite( - references.LANCASTER_TRANSFORM, - description="Introduces the Lancaster MNI-to-Talairach transform, " - "as well as its inverse, the Talairach-to-MNI " - "transform.", -) -@due.dcite( - references.LANCASTER_TRANSFORM_VALIDATION, - description="Validates the Lancaster MNI-to-Talairach and Talairach-to-MNI transforms.", -) def mni2tal(coords): """Convert coordinates from MNI space Talairach space. diff --git a/pyproject.toml b/pyproject.toml index 456cb6d10..f90b323dd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,6 @@ exclude = ''' )/ | versioneer.py | nimare/_version.py - | nimare/due.py ) ''' diff --git a/setup.cfg b/setup.cfg index 4d01dbd1f..59d103597 100644 --- a/setup.cfg +++ b/setup.cfg @@ -82,8 +82,6 @@ tests = flake8-isort pytest pytest-cov -duecredit = - duecredit minimum = indexed_gzip==1.4 nibabel==3.0 @@ -94,7 +92,6 @@ minimum = scikit-learn==0.22 scipy==1.5 # 1.6 drops Python 3.6 support all = - %(duecredit)s %(doc)s %(tests)s @@ -120,7 +117,7 @@ parentdir_prefix = [flake8] max-line-length = 99 -exclude = *build/,_version.py,due.py +exclude = *build/,_version.py putty-ignore = */__init__.py : +F401 ignore = E203,E402,E722,W503 From d42910c25dc9925883adc62265d8e0729a70ee75 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Fri, 29 Jul 2022 11:15:11 -0400 Subject: [PATCH 03/11] Add `tables` attribute to MetaResult class (#734) * Add tables attribute to MetaResult. * Add tables to IBMA estimators. * Fill out other Estimators. * Fix PermutedOLS. * Add tables to the transformation step. * More fixing. * Fix things. Again. * Fix things again. --- nimare/base.py | 4 +- nimare/correct.py | 22 +++++++---- nimare/meta/cbma/ale.py | 10 ++--- nimare/meta/cbma/base.py | 12 +++--- nimare/meta/cbma/mkda.py | 20 +++++----- nimare/meta/ibma.py | 41 +++++++++++--------- nimare/results.py | 75 ++++++++++++++++++++++++++++++++++--- nimare/workflows/conperm.py | 2 +- 8 files changed, 130 insertions(+), 56 deletions(-) diff --git a/nimare/base.py b/nimare/base.py index b1ed307bb..088d3ac14 100644 --- a/nimare/base.py +++ b/nimare/base.py @@ -328,11 +328,11 @@ def fit(self, dataset, drop_invalid=True): """ self._collect_inputs(dataset, drop_invalid=drop_invalid) self._preprocess_input(dataset) - maps = self._fit(dataset) + maps, tables = self._fit(dataset) if hasattr(self, "masker") and self.masker is not None: masker = self.masker else: masker = dataset.masker - return MetaResult(self, masker, maps) + return MetaResult(self, mask=masker, maps=maps, tables=tables) diff --git a/nimare/correct.py b/nimare/correct.py index 24b703ace..14485cd4a 100644 --- a/nimare/correct.py +++ b/nimare/correct.py @@ -153,7 +153,7 @@ def transform(self, result): Returns ------- result : :obj:`~nimare.results.MetaResult` - MetaResult with new corrected maps added. + MetaResult with new corrected maps and tables added. """ correction_method = f"correct_{self._correction_method}_{self.method}" @@ -172,15 +172,18 @@ def transform(self, result): "Using correction method implemented in Estimator: " f"{est.__class__.__module__}.{est.__class__.__name__}.{correction_method}." ) - corr_maps = getattr(est, correction_method)(result, **self.parameters) + corr_maps, corr_tables = getattr(est, correction_method)(result, **self.parameters) else: self._collect_inputs(result) - corr_maps = self._transform(result, method=correction_method) + corr_maps, corr_tables = self._transform(result, method=correction_method) # Update corrected map names and add them to maps dict corr_maps = {(k + self._name_suffix): v for k, v in corr_maps.items()} result.maps.update(corr_maps) + corr_tables = {(k + self._name_suffix): v for k, v in corr_tables.items()} + result.tables.update(corr_tables) + # Update the estimator as well, in order to retain updated null distributions result.estimator = est @@ -208,6 +211,9 @@ def _transform(self, result, method): The map names must _not_ include the ``_name_suffix``:, as that will be added in ``transform()`` (i.e., return "p" not "p_corr-FDR_q-0.05_method-indep"). + corr_tables : :obj:`dict` + An empty dictionary meant to contain any tables (pandas DataFrames) produced by the + correction procedure. """ p = result.maps["p"] @@ -217,7 +223,7 @@ def _transform(self, result, method): p_no_nans = p[nonnan_mask] # Call the correction method - p_corr_no_nans = getattr(self, method)(p_no_nans) + p_corr_no_nans, tables = getattr(self, method)(p_no_nans) # Unmask the corrected p values based on the NaN mask p_corr[nonnan_mask] = p_corr_no_nans @@ -225,7 +231,7 @@ def _transform(self, result, method): # Create a dictionary of the corrected results corr_maps = {"p": p_corr} self._generate_secondary_maps(result, corr_maps) - return corr_maps + return corr_maps, tables class FWECorrector(Corrector): @@ -289,7 +295,7 @@ def correct_fwe_bonferroni(self, p): -------- nimare.stats.bonferroni """ - return bonferroni(p) + return bonferroni(p), {} class FDRCorrector(Corrector): @@ -357,7 +363,7 @@ def correct_fdr_indep(self, p): -------- pymare.stats.fdr """ - return fdr(p, q=self.alpha, method="bh") + return fdr(p, q=self.alpha, method="bh"), {} def correct_fdr_negcorr(self, p): """Perform Benjamini-Yekutieli FDR correction. @@ -397,4 +403,4 @@ def correct_fdr_negcorr(self, p): -------- pymare.stats.fdr """ - return fdr(p, q=self.alpha, method="by") + return fdr(p, q=self.alpha, method="by"), {} diff --git a/nimare/meta/cbma/ale.py b/nimare/meta/cbma/ale.py index 989f8d661..80e5c97df 100755 --- a/nimare/meta/cbma/ale.py +++ b/nimare/meta/cbma/ale.py @@ -418,13 +418,13 @@ def _fit(self, dataset1, dataset2): z_arr = p_to_z(p_values, tail="two") * diff_signs logp_arr = -np.log10(p_values) - images = { + maps = { "stat_desc-group1MinusGroup2": diff_ale_values, "p_desc-group1MinusGroup2": p_values, "z_desc-group1MinusGroup2": z_arr, "logp_desc-group1MinusGroup2": logp_arr, } - return images + return maps, {} def _compute_summarystat_est(self, ma_values): stat_values = 1.0 - np.prod(1.0 - ma_values, axis=0) @@ -640,9 +640,9 @@ def _fit(self, dataset): logp_values = -np.log10(p_values) logp_values[np.isinf(logp_values)] = -np.log10(np.finfo(float).eps) - # Write out unthresholded value images - images = {"stat": stat_values, "logp": logp_values, "z": z_values} - return images + # Write out unthresholded value maps + maps = {"stat": stat_values, "logp": logp_values, "z": z_values} + return maps, {} def _compute_summarystat_est(self, data): """Generate ALE-value array and null distribution from a list of contrasts. diff --git a/nimare/meta/cbma/base.py b/nimare/meta/cbma/base.py index 354f5b89c..e1d1cff4a 100644 --- a/nimare/meta/cbma/base.py +++ b/nimare/meta/cbma/base.py @@ -188,7 +188,7 @@ def _fit(self, dataset): p_values, z_values = self._summarystat_to_p(stat_values, null_method=self.null_method) images = {"stat": stat_values, "p": p_values, "z": z_values} - return images + return images, {} def _compute_weights(self, ma_values): """Perform optional weight computation routine. @@ -815,14 +815,14 @@ def correct_fwe_montecarlo( if vfwe_only: # Return unthresholded value images - images = { + maps = { "logp_level-voxel": logp_vfwe_values, "z_level-voxel": z_vfwe_values, } else: # Return unthresholded value images - images = { + maps = { "logp_level-voxel": logp_vfwe_values, "z_level-voxel": z_vfwe_values, "logp_desc-size_level-cluster": logp_csfwe_values, @@ -831,7 +831,7 @@ def correct_fwe_montecarlo( "z_desc-mass_level-cluster": z_cmfwe_values, } - return images + return maps, {} class PairwiseCBMAEstimator(CBMAEstimator): @@ -908,11 +908,11 @@ def fit(self, dataset1, dataset2, drop_invalid=True): self.inputs_["coordinates2"] = self.inputs_.pop("coordinates") # Now run the Estimator-specific _fit() method. - maps = self._fit(dataset1, dataset2) + maps, tables = self._fit(dataset1, dataset2) if hasattr(self, "masker") and self.masker is not None: masker = self.masker else: masker = dataset1.masker - return MetaResult(self, masker, maps) + return MetaResult(self, mask=masker, maps=maps, tables=tables) diff --git a/nimare/meta/cbma/mkda.py b/nimare/meta/cbma/mkda.py index 517a824bc..392c79500 100644 --- a/nimare/meta/cbma/mkda.py +++ b/nimare/meta/cbma/mkda.py @@ -432,7 +432,7 @@ def _fit(self, dataset1, dataset2): del pFgA_sign, pAgU - images = { + maps = { "prob_desc-A": pA, "prob_desc-AgF": pAgF, "prob_desc-FgA": pFgA, @@ -445,7 +445,7 @@ def _fit(self, dataset1, dataset2): "p_desc-consistency": pAgF_p_vals, "p_desc-specificity": pFgA_p_vals, } - return images + return maps, {} def _run_fwe_permutation(self, iter_xyz1, iter_xyz2, iter_df1, iter_df2, conn, voxel_thresh): """Run a single permutation of the Monte Carlo FWE correction procedure. @@ -664,8 +664,8 @@ def correct_fwe_montecarlo(self, result, voxel_thresh=0.001, n_iters=5000, n_cor Returns ------- - images : :obj:`dict` - Dictionary of 1D arrays corresponding to masked images generated by + maps : :obj:`dict` + Dictionary of 1D arrays corresponding to masked maps generated by the correction procedure. The following arrays are generated by this method: @@ -861,7 +861,7 @@ def correct_fwe_montecarlo(self, result, voxel_thresh=0.001, n_iters=5000, n_cor pFgA_logp_csfwe_vals = -np.log10(pFgA_p_csfwe_vals) pFgA_logp_csfwe_vals[np.isinf(pFgA_logp_csfwe_vals)] = -np.log10(eps) - images = { + maps = { # Consistency analysis "p_desc-consistency_level-voxel": pAgF_p_vfwe_vals, "z_desc-consistency_level-voxel": pAgF_z_vfwe_vals, @@ -883,7 +883,7 @@ def correct_fwe_montecarlo(self, result, voxel_thresh=0.001, n_iters=5000, n_cor "z_desc-specificitySize_level-cluster": pFgA_z_csfwe_vals, "logp_desc-specificitySize_level-cluster": pFgA_logp_csfwe_vals, } - return images + return maps, {} def correct_fdr_indep(self, result, alpha=0.05): """Perform FDR correction using the Benjamini-Hochberg method. @@ -903,8 +903,8 @@ def correct_fdr_indep(self, result, alpha=0.05): Returns ------- - images : :obj:`dict` - Dictionary of 1D arrays corresponding to masked images generated by + maps : :obj:`dict` + Dictionary of 1D arrays corresponding to masked maps generated by the correction procedure. The following arrays are generated by this method: 'z_desc-consistency_level-voxel' and 'z_desc-specificity_level-voxel'. @@ -931,11 +931,11 @@ def correct_fdr_indep(self, result, alpha=0.05): pFgA_p_FDR = fdr(pFgA_p_vals, q=alpha, method="bh") pFgA_z_FDR = p_to_z(pFgA_p_FDR, tail="two") * pFgA_sign - images = { + maps = { "z_desc-consistency_level-voxel": pAgF_z_FDR, "z_desc-specificity_level-voxel": pFgA_z_FDR, } - return images + return maps, {} class KDA(CBMAEstimator): diff --git a/nimare/meta/ibma.py b/nimare/meta/ibma.py index 3538c7f58..8a7d9eccd 100755 --- a/nimare/meta/ibma.py +++ b/nimare/meta/ibma.py @@ -5,6 +5,7 @@ import nibabel as nib import numpy as np +import pandas as pd import pymare from nilearn._utils.niimg_conversions import _check_same_fov from nilearn.image import concat_imgs, resample_to_img @@ -178,11 +179,11 @@ def _fit(self, dataset): est = pymare.estimators.FisherCombinationTest() est.fit_dataset(pymare_dset) est_summary = est.summary() - results = { + maps = { "z": _boolean_unmask(est_summary.z.squeeze(), self.inputs_["aggressive_mask"]), "p": _boolean_unmask(est_summary.p.squeeze(), self.inputs_["aggressive_mask"]), } - return results + return maps, {} class Stouffers(IBMAEstimator): @@ -261,11 +262,11 @@ def _fit(self, dataset): est.fit_dataset(pymare_dset) est_summary = est.summary() - results = { + maps = { "z": _boolean_unmask(est_summary.z.squeeze(), self.inputs_["aggressive_mask"]), "p": _boolean_unmask(est_summary.p.squeeze(), self.inputs_["aggressive_mask"]), } - return results + return maps, {} class WeightedLeastSquares(IBMAEstimator): @@ -349,13 +350,17 @@ def _fit(self, dataset): fe_stats = est_summary.get_fe_stats() # tau2 is an float, not a map, so it can't go in the results dictionary - results = { + maps = { "z": _boolean_unmask(fe_stats["z"].squeeze(), self.inputs_["aggressive_mask"]), "p": _boolean_unmask(fe_stats["p"].squeeze(), self.inputs_["aggressive_mask"]), "est": _boolean_unmask(fe_stats["est"].squeeze(), self.inputs_["aggressive_mask"]), "se": _boolean_unmask(fe_stats["se"].squeeze(), self.inputs_["aggressive_mask"]), } - return results + + tables = { + "level-estimator": pd.DataFrame(columns=["tau2"], data=[self.tau2]), + } + return maps, tables class DerSimonianLaird(IBMAEstimator): @@ -426,14 +431,14 @@ def _fit(self, dataset): est_summary = est.summary() fe_stats = est_summary.get_fe_stats() - results = { + maps = { "z": _boolean_unmask(fe_stats["z"].squeeze(), self.inputs_["aggressive_mask"]), "p": _boolean_unmask(fe_stats["p"].squeeze(), self.inputs_["aggressive_mask"]), "est": _boolean_unmask(fe_stats["est"].squeeze(), self.inputs_["aggressive_mask"]), "se": _boolean_unmask(fe_stats["se"].squeeze(), self.inputs_["aggressive_mask"]), "tau2": _boolean_unmask(est_summary.tau2.squeeze(), self.inputs_["aggressive_mask"]), } - return results + return maps, {} class Hedges(IBMAEstimator): @@ -503,14 +508,14 @@ def _fit(self, dataset): est.fit_dataset(pymare_dset) est_summary = est.summary() fe_stats = est_summary.get_fe_stats() - results = { + maps = { "z": _boolean_unmask(fe_stats["z"].squeeze(), self.inputs_["aggressive_mask"]), "p": _boolean_unmask(fe_stats["p"].squeeze(), self.inputs_["aggressive_mask"]), "est": _boolean_unmask(fe_stats["est"].squeeze(), self.inputs_["aggressive_mask"]), "se": _boolean_unmask(fe_stats["se"].squeeze(), self.inputs_["aggressive_mask"]), "tau2": _boolean_unmask(est_summary.tau2.squeeze(), self.inputs_["aggressive_mask"]), } - return results + return maps, {} class SampleSizeBasedLikelihood(IBMAEstimator): @@ -595,7 +600,7 @@ def _fit(self, dataset): est.fit_dataset(pymare_dset) est_summary = est.summary() fe_stats = est_summary.get_fe_stats() - results = { + maps = { "z": _boolean_unmask(fe_stats["z"].squeeze(), self.inputs_["aggressive_mask"]), "p": _boolean_unmask(fe_stats["p"].squeeze(), self.inputs_["aggressive_mask"]), "est": _boolean_unmask(fe_stats["est"].squeeze(), self.inputs_["aggressive_mask"]), @@ -606,7 +611,7 @@ def _fit(self, dataset): self.inputs_["aggressive_mask"], ), } - return results + return maps, {} class VarianceBasedLikelihood(IBMAEstimator): @@ -701,14 +706,14 @@ def _fit(self, dataset): est.fit_dataset(pymare_dset) est_summary = est.summary() fe_stats = est_summary.get_fe_stats() - results = { + maps = { "z": _boolean_unmask(fe_stats["z"].squeeze(), self.inputs_["aggressive_mask"]), "p": _boolean_unmask(fe_stats["p"].squeeze(), self.inputs_["aggressive_mask"]), "est": _boolean_unmask(fe_stats["est"].squeeze(), self.inputs_["aggressive_mask"]), "se": _boolean_unmask(fe_stats["se"].squeeze(), self.inputs_["aggressive_mask"]), "tau2": _boolean_unmask(est_summary.tau2.squeeze(), self.inputs_["aggressive_mask"]), } - return results + return maps, {} class PermutedOLS(IBMAEstimator): @@ -790,11 +795,11 @@ def _fit(self, dataset): # Convert t to z, preserving signs dof = self.parameters_["tested_vars"].shape[0] - self.parameters_["tested_vars"].shape[1] z_map = t_to_z(t_map, dof) - images = { + maps = { "t": _boolean_unmask(t_map.squeeze(), self.inputs_["aggressive_mask"]), "z": _boolean_unmask(z_map.squeeze(), self.inputs_["aggressive_mask"]), } - return images + return maps, {} def correct_fwe_montecarlo(self, result, n_iters=10000, n_cores=1): """Perform FWE correction using the max-value permutation method. @@ -859,10 +864,10 @@ def correct_fwe_montecarlo(self, result, n_iters=10000, n_cores=1): sign = np.sign(t_map) sign[sign == 0] = 1 z_map = p_to_z(p_map, tail="two") * sign - images = { + maps = { "logp_level-voxel": _boolean_unmask( log_p_map.squeeze(), self.inputs_["aggressive_mask"] ), "z_level-voxel": _boolean_unmask(z_map.squeeze(), self.inputs_["aggressive_mask"]), } - return images + return maps, {} diff --git a/nimare/results.py b/nimare/results.py index 5c4c2a92d..25cb034ff 100644 --- a/nimare/results.py +++ b/nimare/results.py @@ -3,6 +3,8 @@ import logging import os +import numpy as np +import pandas as pd from nibabel.funcs import squeeze_image from nimare.utils import get_masker @@ -19,8 +21,10 @@ class MetaResult(object): The Estimator used to generate the maps in the MetaResult. mask : Niimg-like or `nilearn.input_data.base_masker.BaseMasker` Mask for converting maps between arrays and images. - maps : :obj:`dict` or None, optional - Maps to store in the object. Default is None. + maps : None or :obj:`dict` of :obj:`numpy.ndarray`, optional + Maps to store in the object. The maps must be provided as 1D numpy arrays. Default is None. + tables : None or :obj:`dict` of :obj:`pandas.DataFrame`, optional + Pandas DataFrames to store in the object. Default is None. Attributes ---------- @@ -29,13 +33,32 @@ class MetaResult(object): masker : :class:`~nilearn.input_data.NiftiMasker` or similar Masker object. maps : :obj:`dict` - Keys are map names and values are arrays. + Keys are map names and values are 1D arrays. + tables : :obj:`dict` + Keys are table levels and values are pandas DataFrames. """ - def __init__(self, estimator, mask, maps=None): + def __init__(self, estimator, mask, maps=None, tables=None): self.estimator = copy.deepcopy(estimator) self.masker = get_masker(mask) - self.maps = maps or {} + + maps = maps or {} + tables = tables or {} + + for map_name, map_ in maps.items(): + if not isinstance(map_, np.ndarray): + raise ValueError(f"Maps must be numpy arrays. '{map_name}' is a {type(map_)}") + + if map_.ndim != 1: + LGR.warning(f"Map '{map_name}' should be 1D, not {map_.ndim}D. Squeezing.") + map_ = np.squeeze(map_) + + for table_name, table in tables.items(): + if not isinstance(table, pd.DataFrame): + raise ValueError(f"Tables must be DataFrames. '{table_name}' is a {type(table)}") + + self.maps = maps + self.tables = tables def get_map(self, name, return_type="image"): """Get stored map as image or array. @@ -94,7 +117,47 @@ def save_maps(self, output_dir=".", prefix="", prefix_sep="_", names=None): outpath = os.path.join(output_dir, filename) img.to_filename(outpath) + def save_tables(self, output_dir=".", prefix="", prefix_sep="_", names=None): + """Save result tables to TSV files. + + Parameters + ---------- + output_dir : :obj:`str`, optional + Output directory in which to save results. If the directory doesn't + exist, it will be created. Default is current directory. + prefix : :obj:`str`, optional + Prefix to prepend to output file names. + Default is None. + prefix_sep : :obj:`str`, optional + Separator to add between prefix and default file names. + Default is _. + names : None or :obj:`list` of :obj:`str`, optional + Names of specific tables to write out. If None, save all tables. + Default is None. + """ + if prefix == "": + prefix_sep = "" + + if not prefix.endswith(prefix_sep): + prefix = prefix + prefix_sep + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + names = names or list(self.tables.keys()) + tables = {k: self.tables[k] for k in names} + + for tabletype, table in tables.items(): + filename = prefix + tables + ".tsv" + outpath = os.path.join(output_dir, filename) + table.to_csv(outpath, sep="\t", index=False) + def copy(self): """Return copy of result object.""" - new = MetaResult(self.estimator, self.masker, copy.deepcopy(self.maps)) + new = MetaResult( + self.estimator, + mask=self.masker, + maps=copy.deepcopy(self.maps), + tables=copy.deepcopy(self.tables), + ) return new diff --git a/nimare/workflows/conperm.py b/nimare/workflows/conperm.py index 6a9c19ea9..254edd742 100644 --- a/nimare/workflows/conperm.py +++ b/nimare/workflows/conperm.py @@ -69,7 +69,7 @@ def conperm_workflow(contrast_images, mask_image=None, output_dir=None, prefix=" ) res = {"logp": log_p_map, "t": t_map} # The t_test function will stand in for the Estimator in the results object - res = MetaResult(permuted_ols, mask_image, maps=res) + res = MetaResult(permuted_ols, mask=mask_image, maps=res, tables={}) boilerplate = boilerplate.format( n_studies=n_studies, From 87c3ce30c59382605fd141c6149be25be742be96 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Fri, 29 Jul 2022 12:44:13 -0400 Subject: [PATCH 04/11] Add FocusFilter class for removing coordinates outside of a mask (#732) * Draft FocusFilter. * Support mask in init. * Add FocusFilter to API docs. * Add a test. * Update diagnostics.py --- docs/api.rst | 1 + nimare/diagnostics.py | 75 +++++++++++++++++++++++++++++++- nimare/tests/test_diagnostics.py | 25 ++++++++--- 3 files changed, 95 insertions(+), 6 deletions(-) diff --git a/docs/api.rst b/docs/api.rst index 134909da7..a9120444d 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -94,6 +94,7 @@ For more information about the components of coordinate-based meta-analysis in N :toctree: generated/ :template: class.rst + diagnostics.FocusFilter diagnostics.Jackknife diagnostics.FocusCounter diff --git a/nimare/diagnostics.py b/nimare/diagnostics.py index 47c6a8936..21bb8233e 100644 --- a/nimare/diagnostics.py +++ b/nimare/diagnostics.py @@ -13,7 +13,14 @@ from tqdm.auto import tqdm from nimare.base import NiMAREBase -from nimare.utils import _check_ncores, _get_cluster_coms, mm2vox, tqdm_joblib, vox2mm +from nimare.utils import ( + _check_ncores, + _get_cluster_coms, + get_masker, + mm2vox, + tqdm_joblib, + vox2mm, +) LGR = logging.getLogger(__name__) @@ -389,3 +396,69 @@ def _transform(self, expid, coordinates_df, labeled_cluster_map, affine): focus_counts.append(n_included_voxels) return expid, focus_counts + + +class FocusFilter(NiMAREBase): + """Remove coordinates outside of the Dataset's mask from the Dataset. + + .. versionadded:: 0.0.13 + + Parameters + ---------- + mask : :obj:`str`, :class:`~nibabel.nifti1.Nifti1Image`, \ + :class:`~nilearn.maskers.NiftiMasker` or similar, or None, optional + Mask(er) to use. If None, uses the masker of the Dataset provided in ``transform``. + + Notes + ----- + This filter removes any coordinates outside of the brain mask. + It does not remove studies without coordinates in the brain mask, since a Dataset does not + need to have coordinates for all studies (e.g., some may only have images). + """ + + def __init__(self, mask=None): + if mask is not None: + mask = get_masker(mask) + + self.masker = mask + + def transform(self, dataset): + """Apply the filter to a Dataset. + + Parameters + ---------- + dataset : :obj:`~nimare.dataset.Dataset` + The Dataset to filter. + + Returns + ------- + dataset : :obj:`~nimare.dataset.Dataset` + The filtered Dataset. + """ + masker = self.masker or dataset.masker + + # Get matrix indices for in-brain voxels in the mask + mask_ijk = np.vstack(np.where(masker.mask_img.get_fdata())).T + + # Get matrix indices for Dataset coordinates + dset_xyz = dataset.coordinates[["x", "y", "z"]].values + + # mm2vox automatically rounds the coordinates + dset_ijk = mm2vox(dset_xyz, masker.mask_img.affine) + + keep_idx = [] + for i, coord in enumerate(dset_ijk): + # Check if each coordinate in Dataset is within the mask + # If it is, log that coordinate in keep_idx + if len(np.where((mask_ijk == coord).all(axis=1))[0]): + keep_idx.append(i) + + LGR.info( + f"{dset_ijk.shape[0] - len(keep_idx)}/{dset_ijk.shape[0]} coordinates fall outside of " + "the mask. Removing them." + ) + + # Only retain coordinates inside the brain mask + dataset.coordinates = dataset.coordinates.iloc[keep_idx] + + return dataset diff --git a/nimare/tests/test_diagnostics.py b/nimare/tests/test_diagnostics.py index 3502cf0d9..4b33fd252 100644 --- a/nimare/tests/test_diagnostics.py +++ b/nimare/tests/test_diagnostics.py @@ -4,7 +4,7 @@ import pytest from nilearn.input_data import NiftiLabelsMasker -from nimare.diagnostics import FocusCounter, Jackknife +from nimare import diagnostics from nimare.meta import cbma, ibma from nimare.tests.utils import get_test_data_path @@ -42,7 +42,7 @@ def test_jackknife_smoke( else: res = meta.fit(testdata) - jackknife = Jackknife(target_image=target_image, voxel_thresh=1.65) + jackknife = diagnostics.Jackknife(target_image=target_image, voxel_thresh=1.65) if n_samples == "twosample": with pytest.raises(AttributeError): @@ -64,13 +64,13 @@ def test_jackknife_with_custom_masker_smoke(testdata_ibma): meta = ibma.SampleSizeBasedLikelihood(mask=masker) res = meta.fit(testdata_ibma) - jackknife = Jackknife(target_image="z", voxel_thresh=0.5) + jackknife = diagnostics.Jackknife(target_image="z", voxel_thresh=0.5) cluster_table, labeled_img = jackknife.transform(res) assert cluster_table.shape[0] == len(meta.inputs_["id"]) + 1 # A Jackknife with a target_image that isn't present in the MetaResult raises a ValueError. with pytest.raises(ValueError): - jackknife = Jackknife(target_image="doggy", voxel_thresh=0.5) + jackknife = diagnostics.Jackknife(target_image="doggy", voxel_thresh=0.5) jackknife.transform(res) @@ -99,7 +99,7 @@ def test_focuscounter_smoke( else: res = meta.fit(testdata) - counter = FocusCounter(target_image=target_image, voxel_thresh=1.65) + counter = diagnostics.FocusCounter(target_image=target_image, voxel_thresh=1.65) if n_samples == "twosample": with pytest.raises(AttributeError): @@ -107,3 +107,18 @@ def test_focuscounter_smoke( else: cluster_table, labeled_img = counter.transform(res) assert cluster_table.shape[0] == len(meta.inputs_["id"]) + 1 + + +def test_focusfilter(testdata_laird): + """Ensure that the FocusFilter removes out-of-mask coordinates. + + The Laird dataset contains 16 foci outside of the MNI brain mask, which the filter should + remove. + """ + n_coordinates_all = testdata_laird.coordinates.shape[0] + ffilter = diagnostics.FocusFilter() + filtered_dset = ffilter.transform(testdata_laird) + n_coordinates_filtered = filtered_dset.coordinates.shape[0] + assert n_coordinates_all == 1117 + assert n_coordinates_filtered == 1101 + assert n_coordinates_filtered <= n_coordinates_all From 245b844bd68d1bdf7685eb1b2e2b4309bcb3d5d7 Mon Sep 17 00:00:00 2001 From: JulioAPeraza Date: Wed, 3 Aug 2022 10:26:57 -0400 Subject: [PATCH 05/11] Add `n_cores` parameter to use for parallelization. --- nimare/decode/continuous.py | 127 +++++++++++++++++++++++------------- 1 file changed, 80 insertions(+), 47 deletions(-) diff --git a/nimare/decode/continuous.py b/nimare/decode/continuous.py index afd161e38..07da5f1a7 100755 --- a/nimare/decode/continuous.py +++ b/nimare/decode/continuous.py @@ -4,6 +4,7 @@ import numpy as np import pandas as pd +from joblib import Parallel, delayed from nilearn._utils import load_niimg from nilearn.masking import apply_mask from tqdm.auto import tqdm @@ -13,7 +14,7 @@ from nimare.meta.cbma.base import CBMAEstimator from nimare.meta.cbma.mkda import MKDAChi2 from nimare.stats import pearson -from nimare.utils import _check_type, _safe_transform +from nimare.utils import _check_ncores, _check_type, _safe_transform, tqdm_joblib LGR = logging.getLogger(__name__) @@ -110,6 +111,10 @@ def gclda_decode_map(model, image, topic_priors=None, prior_weight=1): class CorrelationDecoder(Decoder): """Decode an unthresholded image by correlating the image with meta-analytic maps. + .. versionchanged:: 0.0.13 + + * New parameter: `n_cores`. Number of cores to use for parallelization. + .. versionchanged:: 0.0.12 * Remove low-memory option in favor of sparse arrays. @@ -126,6 +131,10 @@ class CorrelationDecoder(Decoder): Meta-analysis estimator. Default is :class:`~nimare.meta.mkda.MKDAChi2`. target_image : :obj:`str` Name of meta-analysis results image to use for decoding. + n_cores : :obj:`int`, optional + Number of cores to use for parallelization. + If <=0, defaults to using all available cores. + Default is 1. Warnings -------- @@ -146,6 +155,7 @@ def __init__( frequency_threshold=0.001, meta_estimator=None, target_image="z_desc-specificity", + n_cores=1, ): if meta_estimator is None: @@ -158,6 +168,7 @@ def __init__( self.frequency_threshold = frequency_threshold self.meta_estimator = meta_estimator self.target_image = target_image + self.n_cores = _check_ncores(n_cores) def _fit(self, dataset): """Generate feature-specific meta-analytic maps for dataset. @@ -179,36 +190,41 @@ def _fit(self, dataset): self.masker = dataset.masker n_features = len(self.features_) - for i_feature, feature in enumerate(tqdm(self.features_, total=n_features)): - feature_ids = dataset.get_studies_by_label( - labels=[feature], - label_threshold=self.frequency_threshold, + images_ = [[]] * n_features + with tqdm_joblib(tqdm(total=n_features)): + Parallel(n_jobs=self.n_cores)( + delayed(self._run_fit)(i_feature, feature, dataset, images_) + for i_feature, feature in enumerate(self.features_) ) - # Limit selected studies to studies with valid data - feature_ids = sorted(list(set(feature_ids).intersection(self.inputs_["id"]))) - - # Create the reduced Dataset - feature_dset = dataset.slice(feature_ids) - - # Check if the meta method is a pairwise estimator - # This seems like a somewhat inelegant solution - if "dataset2" in inspect.getfullargspec(self.meta_estimator.fit).args: - nonfeature_ids = sorted(list(set(self.inputs_["id"]) - set(feature_ids))) - nonfeature_dset = dataset.slice(nonfeature_ids) - meta_results = self.meta_estimator.fit(feature_dset, nonfeature_dset) - else: - meta_results = self.meta_estimator.fit(feature_dset) - - feature_data = meta_results.get_map( - self.target_image, - return_type="array", - ) - if i_feature == 0: - images_ = np.zeros((len(self.features_), len(feature_data)), feature_data.dtype) - images_[i_feature, :] = feature_data + self.images_ = np.vstack(images_) - self.images_ = images_ + def _run_fit(self, i_feature, feature, dataset, images_): + feature_ids = dataset.get_studies_by_label( + labels=[feature], + label_threshold=self.frequency_threshold, + ) + # Limit selected studies to studies with valid data + feature_ids = sorted(list(set(feature_ids).intersection(self.inputs_["id"]))) + + # Create the reduced Dataset + feature_dset = dataset.slice(feature_ids) + + # Check if the meta method is a pairwise estimator + # This seems like a somewhat inelegant solution + if "dataset2" in inspect.getfullargspec(self.meta_estimator.fit).args: + nonfeature_ids = sorted(list(set(self.inputs_["id"]) - set(feature_ids))) + nonfeature_dset = dataset.slice(nonfeature_ids) + meta_results = self.meta_estimator.fit(feature_dset, nonfeature_dset) + else: + meta_results = self.meta_estimator.fit(feature_dset) + + feature_data = meta_results.get_map( + self.target_image, + return_type="array", + ) + + images_[i_feature] = feature_data def transform(self, img): """Correlate target image with each feature-specific meta-analytic map. @@ -233,6 +249,10 @@ def transform(self, img): class CorrelationDistributionDecoder(Decoder): """Decode an unthresholded image by correlating the image with study-wise images. + .. versionchanged:: 0.0.13 + + * New parameter: `n_cores`. Number of cores to use for parallelization. + Parameters ---------- feature_group : :obj:`str`, optional @@ -243,6 +263,10 @@ class CorrelationDistributionDecoder(Decoder): Frequency threshold. Default is 0.001. target_image : {'z', 'con'}, optional Name of meta-analysis results image to use for decoding. Default is 'z'. + n_cores : :obj:`int`, optional + Number of cores to use for parallelization. + If <=0, defaults to using all available cores. + Default is 1. Warnings -------- @@ -261,11 +285,13 @@ def __init__( features=None, frequency_threshold=0.001, target_image="z", + n_cores=1, ): self.feature_group = feature_group self.features = features self.frequency_threshold = frequency_threshold self._required_inputs["images"] = ("image", target_image) + self.n_cores = _check_ncores(n_cores) def _fit(self, dataset): """Collect sets of maps from the Dataset corresponding to each requested feature. @@ -286,31 +312,38 @@ def _fit(self, dataset): """ self.masker = dataset.masker + n_features = len(self.features_) images_ = {} - for feature in self.features_: - feature_ids = dataset.get_studies_by_label( - labels=[feature], label_threshold=self.frequency_threshold + with tqdm_joblib(tqdm(total=n_features)): + Parallel(n_jobs=self.n_cores)( + delayed(self._run_fit)(feature, dataset, images_) for feature in self.features_ ) - selected_ids = sorted(list(set(feature_ids).intersection(self.inputs_["id"]))) - selected_id_idx = [ - i_id for i_id, id_ in enumerate(self.inputs_["id"]) if id_ in selected_ids - ] - test_imgs = [ - img for i_img, img in enumerate(self.inputs_["images"]) if i_img in selected_id_idx - ] - if len(test_imgs): - feature_arr = _safe_transform( - test_imgs, - self.masker, - memfile=None, - ) - images_[feature] = feature_arr - else: - LGR.info(f"Skipping feature '{feature}'. No images found.") + # reduce features again self.features_ = [f for f in self.features_ if f in images_.keys()] self.images_ = images_ + def _run_fit(self, feature, dataset, images_): + feature_ids = dataset.get_studies_by_label( + labels=[feature], label_threshold=self.frequency_threshold + ) + selected_ids = sorted(list(set(feature_ids).intersection(self.inputs_["id"]))) + selected_id_idx = [ + i_id for i_id, id_ in enumerate(self.inputs_["id"]) if id_ in selected_ids + ] + test_imgs = [ + img for i_img, img in enumerate(self.inputs_["images"]) if i_img in selected_id_idx + ] + if len(test_imgs): + feature_arr = _safe_transform( + test_imgs, + self.masker, + memfile=None, + ) + images_[feature] = feature_arr + else: + LGR.info(f"Skipping feature '{feature}'. No images found.") + def transform(self, img): """Correlate target image with each map associated with each feature. From 38cb335a9c85ca9b75c3b9165243ee5eb3de084a Mon Sep 17 00:00:00 2001 From: JulioAPeraza Date: Thu, 4 Aug 2022 14:50:41 -0400 Subject: [PATCH 06/11] Add returns to parallel functions --- .../01_datasets/02_download_neurosynth.py | 2 +- nimare/decode/base.py | 2 +- nimare/decode/continuous.py | 29 ++++++++++--------- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/examples/01_datasets/02_download_neurosynth.py b/examples/01_datasets/02_download_neurosynth.py index 447b4a5e0..bba3efc15 100644 --- a/examples/01_datasets/02_download_neurosynth.py +++ b/examples/01_datasets/02_download_neurosynth.py @@ -86,7 +86,7 @@ version="1", overwrite=False, source="combined", - vocab="neuroquery7547", + vocab="neuroquery6308", type="tfidf", ) # Note that the files are saved to a new folder within "out_dir" named "neuroquery". diff --git a/nimare/decode/base.py b/nimare/decode/base.py index 7c401ff38..456975286 100644 --- a/nimare/decode/base.py +++ b/nimare/decode/base.py @@ -77,7 +77,7 @@ def _preprocess_input(self, dataset): if not len(features): raise Exception("No features identified in Dataset!") elif len(features) < n_features_orig: - LGR.info(f"Retaining {len(features)}/({n_features_orig} features.") + LGR.info(f"Retaining {len(features)}/{n_features_orig} features.") self.features_ = features diff --git a/nimare/decode/continuous.py b/nimare/decode/continuous.py index 07da5f1a7..ce723a4f6 100755 --- a/nimare/decode/continuous.py +++ b/nimare/decode/continuous.py @@ -190,16 +190,18 @@ def _fit(self, dataset): self.masker = dataset.masker n_features = len(self.features_) - images_ = [[]] * n_features with tqdm_joblib(tqdm(total=n_features)): - Parallel(n_jobs=self.n_cores)( - delayed(self._run_fit)(i_feature, feature, dataset, images_) - for i_feature, feature in enumerate(self.features_) + images_, feature_idx = zip( + *Parallel(n_jobs=self.n_cores)( + delayed(self._run_fit)(i_feature, feature, dataset) + for i_feature, feature in enumerate(self.features_) + ) ) + # Convert to an array and sort the images_ array based on the feature index. + images_ = np.array(images_)[np.array(feature_idx)] + self.images_ = images_ - self.images_ = np.vstack(images_) - - def _run_fit(self, i_feature, feature, dataset, images_): + def _run_fit(self, i_feature, feature, dataset): feature_ids = dataset.get_studies_by_label( labels=[feature], label_threshold=self.frequency_threshold, @@ -224,7 +226,7 @@ def _run_fit(self, i_feature, feature, dataset, images_): return_type="array", ) - images_[i_feature] = feature_data + return feature_data, i_feature def transform(self, img): """Correlate target image with each feature-specific meta-analytic map. @@ -313,17 +315,18 @@ def _fit(self, dataset): self.masker = dataset.masker n_features = len(self.features_) - images_ = {} with tqdm_joblib(tqdm(total=n_features)): - Parallel(n_jobs=self.n_cores)( - delayed(self._run_fit)(feature, dataset, images_) for feature in self.features_ + images_ = dict( + Parallel(n_jobs=self.n_cores)( + delayed(self._run_fit)(feature, dataset) for feature in self.features_ + ) ) # reduce features again self.features_ = [f for f in self.features_ if f in images_.keys()] self.images_ = images_ - def _run_fit(self, feature, dataset, images_): + def _run_fit(self, feature, dataset): feature_ids = dataset.get_studies_by_label( labels=[feature], label_threshold=self.frequency_threshold ) @@ -340,7 +343,7 @@ def _run_fit(self, feature, dataset, images_): self.masker, memfile=None, ) - images_[feature] = feature_arr + return feature, feature_arr else: LGR.info(f"Skipping feature '{feature}'. No images found.") From 936d79619893d0ab5520bfc022e8430b95cfc569 Mon Sep 17 00:00:00 2001 From: JulioAPeraza Date: Thu, 4 Aug 2022 14:57:54 -0400 Subject: [PATCH 07/11] Fix linter issue --- nimare/tests/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nimare/tests/utils.py b/nimare/tests/utils.py index b54b09a15..429d5a9b9 100644 --- a/nimare/tests/utils.py +++ b/nimare/tests/utils.py @@ -23,7 +23,8 @@ def get_test_data_path(): def _create_signal_mask(ground_truth_foci_ijks, mask): - """Create complementary binary images to identify areas of likely significance and nonsignificance. + """Create complementary binary images to identify areas of likely significance and + nonsignificance. Parameters ---------- From 865dfd42f5d401455af98f29f06e0f02a2da2f24 Mon Sep 17 00:00:00 2001 From: JulioAPeraza Date: Thu, 4 Aug 2022 15:11:02 -0400 Subject: [PATCH 08/11] Revert "Fix linter issue" This reverts commit 936d79619893d0ab5520bfc022e8430b95cfc569. --- nimare/tests/utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nimare/tests/utils.py b/nimare/tests/utils.py index 429d5a9b9..b54b09a15 100644 --- a/nimare/tests/utils.py +++ b/nimare/tests/utils.py @@ -23,8 +23,7 @@ def get_test_data_path(): def _create_signal_mask(ground_truth_foci_ijks, mask): - """Create complementary binary images to identify areas of likely significance and - nonsignificance. + """Create complementary binary images to identify areas of likely significance and nonsignificance. Parameters ---------- From 79a8c9500558241f73f123377e726fdcdbb32146 Mon Sep 17 00:00:00 2001 From: JulioAPeraza Date: Fri, 5 Aug 2022 12:52:55 -0400 Subject: [PATCH 09/11] Shorten summary sentence --- nimare/tests/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nimare/tests/utils.py b/nimare/tests/utils.py index b54b09a15..940965b79 100644 --- a/nimare/tests/utils.py +++ b/nimare/tests/utils.py @@ -23,7 +23,7 @@ def get_test_data_path(): def _create_signal_mask(ground_truth_foci_ijks, mask): - """Create complementary binary images to identify areas of likely significance and nonsignificance. + """Create binary images to identify areas of likely significance and nonsignificance. Parameters ---------- From fe647cdcc2a6780ddcd68ddb1296e610d773c4d4 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Sat, 6 Aug 2022 09:13:03 -0400 Subject: [PATCH 10/11] Switch from face+edge connectivity to face-only (#733) * Switch from face+edge connectivity to face-only. * Add a note about connectivity structure. * Remove note title. * Update cbma.rst --- docs/cbma.rst | 18 ++++++++++++++++++ nimare/diagnostics.py | 12 ++++++++++-- nimare/meta/cbma/base.py | 6 +++++- nimare/meta/cbma/mkda.py | 12 ++++++++++-- 4 files changed, 43 insertions(+), 5 deletions(-) diff --git a/docs/cbma.rst b/docs/cbma.rst index f497e044c..e7cc1bf11 100644 --- a/docs/cbma.rst +++ b/docs/cbma.rst @@ -139,6 +139,24 @@ The Monte Carlo FWE correction approach implemented in NiMARE produces three new **Voxel-level correction is generally more conservative than cluster-level correction, so it is only recommended for very large meta-analyses (i.e., hundreds of studies).** +.. important:: + + Starting in version 0.0.13, clusters in the cluster-level corrected images are defined using + faces connectivity (also known as 1st nearest-neighbor, NN1, or 6 neighbor connectivity), + which counts voxels sharing a face as connected. + This is more restrictive than other connectivity structures, + including faces+edges (aka 2nd nearest-neighbor, NN2, or 18 neighbor connectivity) + and faces+edges+corners (aka 3rd nearest-neighbor, NN3, or 26 neighbor connectivity). + + Prior to version 0.0.13, clusters were defined using faces+edges connectivity. + + Different tools use different connectivity structures. + Nilearn uses faces connectivity, like NiMARE, while SPM uses faces+edges. + FSL allows users to select one of the three connectivity structures, using the ``--connectivity`` parameter. + Most AFNI programs also allow users to select a connectivity structure, + though the actual parameter differs across programs. + + .. admonition:: What about threshold-free cluster enhancement? TFCE :footcite:p:`smith2009threshold` is a voxel-level metric that combines signal magnitude and diff --git a/nimare/diagnostics.py b/nimare/diagnostics.py index 21bb8233e..4e3f1e560 100644 --- a/nimare/diagnostics.py +++ b/nimare/diagnostics.py @@ -28,6 +28,10 @@ class Jackknife(NiMAREBase): """Run a jackknife analysis on a meta-analysis result. + .. versionchanged:: 0.0.13 + + Change cluster neighborhood from faces+edges to faces, to match Nilearn. + .. versionadded:: 0.0.11 Parameters @@ -139,7 +143,7 @@ def transform(self, result): # Let's label the clusters in the thresholded map so we can use it as a NiftiLabelsMasker # This won't work when the Estimator's masker isn't a NiftiMasker... :( - conn = ndimage.generate_binary_structure(3, 2) + conn = ndimage.generate_binary_structure(rank=3, connectivity=1) labeled_cluster_arr, n_clusters = ndimage.label(thresh_arr, conn) labeled_cluster_img = nib.Nifti1Image( labeled_cluster_arr, @@ -236,6 +240,10 @@ def _transform( class FocusCounter(NiMAREBase): """Run a focus-count analysis on a coordinate-based meta-analysis result. + .. versionchanged:: 0.0.13 + + Change cluster neighborhood from faces+edges to faces, to match Nilearn. + .. versionadded:: 0.0.12 Parameters @@ -335,7 +343,7 @@ def transform(self, result): # Let's label the clusters in the thresholded map so we can use it as a NiftiLabelsMasker # This won't work when the Estimator's masker isn't a NiftiMasker... :( - conn = ndimage.generate_binary_structure(3, 2) + conn = ndimage.generate_binary_structure(rank=3, connectivity=1) labeled_cluster_arr, n_clusters = ndimage.label(thresh_arr, conn) labeled_cluster_img = nib.Nifti1Image( labeled_cluster_arr, diff --git a/nimare/meta/cbma/base.py b/nimare/meta/cbma/base.py index e1d1cff4a..484f05bb4 100644 --- a/nimare/meta/cbma/base.py +++ b/nimare/meta/cbma/base.py @@ -616,6 +616,10 @@ def correct_fwe_montecarlo( Only call this method from within a Corrector. + .. versionchanged:: 0.0.13 + + Change cluster neighborhood from faces+edges to faces, to match Nilearn. + .. versionchanged:: 0.0.12 * Fix the ``vfwe_only`` option. @@ -729,7 +733,7 @@ def correct_fwe_montecarlo( iter_df = self.inputs_["coordinates"].copy() # Define connectivity matrix for cluster labeling - conn = ndimage.generate_binary_structure(3, 2) + conn = ndimage.generate_binary_structure(rank=3, connectivity=1) with tqdm_joblib(tqdm(total=n_iters)): perm_results = Parallel(n_jobs=n_cores)( diff --git a/nimare/meta/cbma/mkda.py b/nimare/meta/cbma/mkda.py index 392c79500..57a8a9854 100644 --- a/nimare/meta/cbma/mkda.py +++ b/nimare/meta/cbma/mkda.py @@ -567,6 +567,10 @@ def _run_fwe_permutation(self, iter_xyz1, iter_xyz2, iter_df1, iter_df2, conn, v def _apply_correction(self, stat_values, voxel_thresh, vfwe_null, csfwe_null, cmfwe_null): """Apply different kinds of FWE correction to statistical value matrix. + .. versionchanged:: 0.0.13 + + Change cluster neighborhood from faces+edges to faces, to match Nilearn. + Parameters ---------- stat_values : :obj:`numpy.ndarray` @@ -584,7 +588,7 @@ def _apply_correction(self, stat_values, voxel_thresh, vfwe_null, csfwe_null, cm eps = np.spacing(1) # Define connectivity matrix for cluster labeling - conn = ndimage.generate_binary_structure(3, 2) + conn = ndimage.generate_binary_structure(rank=3, connectivity=1) # Voxel-level FWE p_vfwe_values = null_to_p(np.abs(stat_values), vfwe_null, tail="upper") @@ -647,6 +651,10 @@ def correct_fwe_montecarlo(self, result, voxel_thresh=0.001, n_iters=5000, n_cor Only call this method from within a Corrector. + .. versionchanged:: 0.0.13 + + Change cluster neighborhood from faces+edges to faces, to match Nilearn. + .. versionchanged:: 0.0.12 Include cluster level-corrected results in Monte Carlo null method. @@ -767,7 +775,7 @@ def correct_fwe_montecarlo(self, result, voxel_thresh=0.001, n_iters=5000, n_cor ss_thresh = chi2.isf(voxel_thresh, 1) # Define connectivity matrix for cluster labeling - conn = ndimage.generate_binary_structure(3, 2) + conn = ndimage.generate_binary_structure(rank=3, connectivity=1) with tqdm_joblib(tqdm(total=n_iters)): perm_results = Parallel(n_jobs=n_cores)( From 6388514b90bc3218694e9022efceaf0a481bef71 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Sat, 6 Aug 2022 11:35:36 -0400 Subject: [PATCH 11/11] Remove conperm and scale CLI workflows (#740) * Remove the unimplemented SCALE workflow. * Remove the conperm workflow. Users should just use Nilearn directly when they aren't operating within a NiMARE dataset format. * Remove associated tests. * Update utils.py --- docs/api.rst | 2 - nimare/cli.py | 101 -------------------------------- nimare/tests/test_workflows.py | 80 ------------------------- nimare/tests/utils.py | 2 +- nimare/workflows/__init__.py | 9 +-- nimare/workflows/conperm.py | 88 ---------------------------- nimare/workflows/scale.py | 103 --------------------------------- 7 files changed, 2 insertions(+), 383 deletions(-) delete mode 100644 nimare/workflows/conperm.py delete mode 100644 nimare/workflows/scale.py diff --git a/docs/api.rst b/docs/api.rst index a9120444d..8920c1e60 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -308,9 +308,7 @@ For more information about fetching data from the internet, see :ref:`fetching t :template: function.rst workflows.ale_sleuth_workflow - workflows.conperm_workflow workflows.macm_workflow - workflows.scale_workflow .. _api_base_ref: diff --git a/nimare/cli.py b/nimare/cli.py index bb4fc563b..847d6e732 100644 --- a/nimare/cli.py +++ b/nimare/cli.py @@ -4,9 +4,7 @@ from nimare.io import convert_neurosynth_to_json, convert_sleuth_to_json from nimare.workflows.ale import ale_sleuth_workflow -from nimare.workflows.conperm import conperm_workflow from nimare.workflows.macm import macm_workflow -from nimare.workflows.scale import scale_workflow def _is_valid_file(parser, arg): @@ -92,52 +90,6 @@ def _get_parser(): help=("Number of processes to use for meta-analysis. If -1, use all available cores."), ) - # Contrast permutation workflow - conperm_parser = subparsers.add_parser( - "conperm", - help=( - "Meta-analysis of contrast maps using random effects and " - "two-sided inference with empirical (permutation-based) null " - "distribution and Family Wise Error multiple comparisons " - "correction. Input may be a list of 3D files or a single 4D " - "file." - ), - ) - conperm_parser.set_defaults(func=conperm_workflow) - conperm_parser.add_argument( - "contrast_images", - nargs="+", - metavar="FILE", - type=lambda x: _is_valid_file(parser, x), - help=("Data to analyze. May be a single 4D file or a list of 3D files."), - ) - conperm_parser.add_argument( - "--mask", - dest="mask_image", - metavar="FILE", - type=lambda x: _is_valid_file(parser, x), - help=("Mask file."), - default=None, - ) - conperm_parser.add_argument( - "--output_dir", - dest="output_dir", - metavar="PATH", - type=str, - help=("Output directory."), - default=".", - ) - conperm_parser.add_argument( - "--prefix", dest="prefix", type=str, help=("Common prefix for output maps."), default="" - ) - conperm_parser.add_argument( - "--n_iters", - dest="n_iters", - type=int, - help=("Number of iterations for permutation testing."), - default=10000, - ) - # MACM macm_parser = subparsers.add_parser( "macm", @@ -192,59 +144,6 @@ def _get_parser(): help=("Number of processes to use for meta-analysis. If -1, use all available cores."), ) - # SCALE - scale_parser = subparsers.add_parser( - "scale", - help=( - "Method for performing Specific CoActivation Likelihood " - "Estimation (SCALE), a modified meta-analytic coactivation " - "modeling (MACM) that takes activation frequency bias into " - "account, for delineating distinct core networks of " - "coactivation, using a permutation-based approach." - ), - ) - scale_parser.set_defaults(func=scale_workflow) - scale_parser.add_argument( - "dataset_file", type=lambda x: _is_valid_file(parser, x), help=("Dataset file to analyze.") - ) - scale_parser.add_argument( - "--baseline", - type=lambda x: _is_valid_file(parser, x), - help=("Voxel-wise baseline activation rates."), - ) - scale_parser.add_argument( - "--output_dir", - dest="output_dir", - metavar="PATH", - type=str, - help=("Output directory."), - default=".", - ) - scale_parser.add_argument( - "--prefix", dest="prefix", type=str, help=("Common prefix for output maps."), default="" - ) - scale_parser.add_argument( - "--n_iters", - dest="n_iters", - type=int, - help=("Number of iterations for permutation testing."), - default=2500, - ) - scale_parser.add_argument( - "--v_thr", - dest="v_thr", - type=float, - help=("Voxel p-value threshold used to create clusters."), - default=0.001, - ) - scale_parser.add_argument( - "--n_cores", - dest="n_cores", - type=int, - default=1, - help=("Number of processes to use for meta-analysis. If -1, use all available cores."), - ) - # Conversion workflows sleuth2nimare_parser = subparsers.add_parser( "sleuth2nimare", help=("Convert a Sleuth text file to a NiMARE json file.") diff --git a/nimare/tests/test_workflows.py b/nimare/tests/test_workflows.py index 526ec90ac..98de585a6 100644 --- a/nimare/tests/test_workflows.py +++ b/nimare/tests/test_workflows.py @@ -81,83 +81,3 @@ def test_ale_workflow_cli_smoke_2(tmp_path_factory): ] ) assert op.isfile(op.join(tmpdir, f"{prefix}_group2_input_coordinates.txt")) - - -def test_scale_workflow_function_smoke(tmp_path_factory): - """Run smoke test of the SCALE workflow as a function.""" - tmpdir = tmp_path_factory.mktemp("test_scale_workflow_function_smoke") - sleuth_file = op.join(get_test_data_path(), "test_sleuth_file.txt") - prefix = "test" - baseline = op.join(get_test_data_path(), "test_baseline.txt") - - # The same test is run with both workflow function and CLI - workflows.scale_workflow( - sleuth_file, baseline=baseline, output_dir=tmpdir, prefix=prefix, n_iters=5, n_cores=1 - ) - assert op.isfile(op.join(tmpdir, f"{prefix}_input_coordinates.txt")) - - -def test_scale_workflow_cli_smoke(tmp_path_factory): - """Run smoke test of the SCALE workflow as a CLI.""" - tmpdir = tmp_path_factory.mktemp("test_scale_workflow_cli_smoke") - sleuth_file = op.join(get_test_data_path(), "test_sleuth_file.txt") - prefix = "test" - baseline = op.join(get_test_data_path(), "test_baseline.txt") - - cli._main( - [ - "scale", - "--baseline", - baseline, - "--output_dir", - str(tmpdir), - "--prefix", - prefix, - "--n_iters", - "5", - "--n_cores", - "1", - sleuth_file, - ] - ) - assert op.isfile(op.join(tmpdir, f"{prefix}_input_coordinates.txt")) - - -def test_conperm_workflow_function_smoke(testdata_ibma, tmp_path_factory): - """Run smoke test of the contrast permutation workflow as a function.""" - tmpdir = tmp_path_factory.mktemp("test_conperm_workflow_function_smoke") - dset = testdata_ibma - files = dset.get_images(imtype="beta") - mask_image = op.join(get_test_data_path(), "test_pain_dataset", "mask.nii.gz") - prefix = "test" - - # The same test is run with both workflow function and CLI - workflows.conperm_workflow( - files, mask_image=mask_image, output_dir=tmpdir, prefix=prefix, n_iters=5 - ) - assert op.isfile(op.join(tmpdir, f"{prefix}_logp.nii.gz")) - - -def test_conperm_workflow_cli_smoke(testdata_ibma, tmp_path_factory): - """Run smoke test of the contrast permutation workflow as a CLI.""" - tmpdir = tmp_path_factory.mktemp("test_conperm_workflow_cli_smoke") - dset = testdata_ibma - files = dset.get_images(imtype="beta") - mask_image = op.join(get_test_data_path(), "test_pain_dataset", "mask.nii.gz") - prefix = "test" - - cli._main( - [ - "conperm", - "--output_dir", - str(tmpdir), - "--mask", - mask_image, - "--prefix", - prefix, - "--n_iters", - "5", - ] - + files - ) - assert op.isfile(op.join(tmpdir, f"{prefix}_logp.nii.gz")) diff --git a/nimare/tests/utils.py b/nimare/tests/utils.py index b54b09a15..940965b79 100644 --- a/nimare/tests/utils.py +++ b/nimare/tests/utils.py @@ -23,7 +23,7 @@ def get_test_data_path(): def _create_signal_mask(ground_truth_foci_ijks, mask): - """Create complementary binary images to identify areas of likely significance and nonsignificance. + """Create binary images to identify areas of likely significance and nonsignificance. Parameters ---------- diff --git a/nimare/workflows/__init__.py b/nimare/workflows/__init__.py index 3ce5446cd..ecc0c2ab6 100644 --- a/nimare/workflows/__init__.py +++ b/nimare/workflows/__init__.py @@ -1,13 +1,6 @@ """Common meta-analytic workflows.""" from .ale import ale_sleuth_workflow -from .conperm import conperm_workflow from .macm import macm_workflow -from .scale import scale_workflow -__all__ = [ - "ale_sleuth_workflow", - "conperm_workflow", - "macm_workflow", - "scale_workflow", -] +__all__ = ["ale_sleuth_workflow", "macm_workflow"] diff --git a/nimare/workflows/conperm.py b/nimare/workflows/conperm.py deleted file mode 100644 index 254edd742..000000000 --- a/nimare/workflows/conperm.py +++ /dev/null @@ -1,88 +0,0 @@ -"""Run a contrast permutation meta-analysis on a set of images.""" -import logging -import os -import pathlib - -import numpy as np -from nilearn.masking import apply_mask -from nilearn.mass_univariate import permuted_ols - -from nimare.results import MetaResult -from nimare.utils import get_template - -LGR = logging.getLogger(__name__) - - -def conperm_workflow(contrast_images, mask_image=None, output_dir=None, prefix="", n_iters=10000): - """Run a contrast permutation workflow.""" - from nimare import __version__ - - if mask_image is None: - target = "mni152_2mm" - mask_image = get_template(target, mask="brain") - - n_studies = len(contrast_images) - LGR.info("Loading contrast maps...") - z_data = apply_mask(contrast_images, mask_image) - - boilerplate = """ -A contrast permutation analysis was performed on a sample of {n_studies} -images with NiMARE {version} (RRID:SCR_017398; Salo et al., 2022a; Salo et al., 2022b). -A brain mask derived from the MNI 152 template (Fonov et al., 2009; Fonov et al., 2011) -was applied at 2x2x2mm resolution. The sign flipping -method used was implemented as described in Maumet & Nichols (2016), with -{n_iters} iterations used to estimate the null distribution. - -References ----------- -- Fonov, V., Evans, A. C., Botteron, K., Almli, C. R., McKinstry, R. C., - Collins, D. L., & Brain Development Cooperative Group. (2011). - Unbiased average age-appropriate atlases for pediatric studies. - Neuroimage, 54(1), 313-327. -- Fonov, V. S., Evans, A. C., McKinstry, R. C., Almli, C. R., & Collins, D. L. - (2009). Unbiased nonlinear average age-appropriate brain templates from birth - to adulthood. NeuroImage, (47), S102. -- Maumet, C., & Nichols, T. E. (2016). Minimal Data Needed for Valid & Accurate - Image-Based fMRI Meta-Analysis. https://doi.org/10.1101/048249 -- Salo et al. (2022). NiMARE: Neuroimaging Meta-Analysis Research Environment. - NeuroLibre Reproducible Preprint Server, 1(1), 7, https://doi.org/10.55458/neurolibre.00007. -- Salo, Taylor, Yarkoni, Tal, Nichols, Thomas E., Poline, Jean-Baptiste, Kent, James D., - Gorgolewski, Krzysztof J., Glerean, Enrico, Bottenhorn, Katherine L., Bilgel, Murat, - Wright, Jessey, Reeders, Puck, Kimbler, Adam, Nielson, Dylan N., Yanes, Julio A., - Pérez, Alexandre, Oudyk, Kendra M., Jarecka, Dorota, Enge, Alexander, - Peraza, Julio A., ... Laird, Angela R. (2022). neurostuff/NiMARE: {version} - ({version}). Zenodo. https://doi.org/10.5281/zenodo.6642243. - **NOTE** Please replace this with the version-specific Zenodo reference in your manuscript. - """ - - LGR.info("Performing meta-analysis.") - log_p_map, t_map, _ = permuted_ols( - np.ones((z_data.shape[0], 1)), - z_data, - confounding_vars=None, - model_intercept=False, # modeled by tested_vars - n_perm=n_iters, - two_sided_test=True, - random_state=42, - n_jobs=1, - verbose=0, - ) - res = {"logp": log_p_map, "t": t_map} - # The t_test function will stand in for the Estimator in the results object - res = MetaResult(permuted_ols, mask=mask_image, maps=res, tables={}) - - boilerplate = boilerplate.format( - n_studies=n_studies, - n_iters=n_iters, - version=__version__, - ) - - if output_dir is None: - output_dir = os.getcwd() - else: - pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True) - - LGR.info("Saving output maps...") - res.save_maps(output_dir=output_dir, prefix=prefix) - LGR.info("Workflow completed.") - LGR.info(boilerplate) diff --git a/nimare/workflows/scale.py b/nimare/workflows/scale.py deleted file mode 100644 index bd0909424..000000000 --- a/nimare/workflows/scale.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Workflow for running a SCALE meta-analysis from a Sleuth text file.""" -import logging -import os -import pathlib -from shutil import copyfile - -import numpy as np - -from nimare.dataset import Dataset -from nimare.io import convert_sleuth_to_dataset -from nimare.meta import SCALE -from nimare.utils import vox2mm - -LGR = logging.getLogger(__name__) - - -def scale_workflow( - dataset_file, - baseline=None, - output_dir=None, - prefix=None, - n_iters=2500, - v_thr=0.001, - n_cores=1, -): - """Perform SCALE meta-analysis from Sleuth text file or NiMARE json file. - - Warnings - -------- - This method is not yet implemented. - """ - from nimare import __version__ - - if dataset_file.endswith(".json"): - dset = Dataset(dataset_file, target="mni152_2mm") - elif dataset_file.endswith(".txt"): - dset = convert_sleuth_to_dataset(dataset_file, target="mni152_2mm") - else: - dset = Dataset.load(dataset_file) - - boilerplate = """ -A specific coactivation likelihood estimation (SCALE; Langner et al., 2014) -meta-analysis was performed using NiMARE {version} -(RRID:SCR_017398; Salo et al., 2022a; Salo et al., 2022b). -The input dataset included {n} studies/experiments. - -Voxel-specific null distributions were generated using base rates from {bl} -with {n_iters} iterations. Results were thresholded at p < {thr}. - -References ----------- -- Langner, R., Rottschy, C., Laird, A. R., Fox, P. T., & Eickhoff, S. B. (2014). - Meta-analytic connectivity modeling revisited: controlling for activation base - rates. NeuroImage, 99, 559-570. -- Salo et al. (2022). NiMARE: Neuroimaging Meta-Analysis Research Environment. - NeuroLibre Reproducible Preprint Server, 1(1), 7, https://doi.org/10.55458/neurolibre.00007. -- Salo, Taylor, Yarkoni, Tal, Nichols, Thomas E., Poline, Jean-Baptiste, Kent, James D., - Gorgolewski, Krzysztof J., Glerean, Enrico, Bottenhorn, Katherine L., Bilgel, Murat, - Wright, Jessey, Reeders, Puck, Kimbler, Adam, Nielson, Dylan N., Yanes, Julio A., - Pérez, Alexandre, Oudyk, Kendra M., Jarecka, Dorota, Enge, Alexander, - Peraza, Julio A., ... Laird, Angela R. (2022). neurostuff/NiMARE: {version} - ({version}). Zenodo. https://doi.org/10.5281/zenodo.6642243. - **NOTE** Please replace this with the version-specific Zenodo reference in your manuscript. - """ - boilerplate = boilerplate.format( - n=len(dset.ids), - thr=v_thr, - bl=baseline if baseline else "a gray matter template", - n_iters=n_iters, - version=__version__, - ) - - # At the moment, the baseline file should be an n_coords X 3 list of matrix - # indices matching the dataset template, where the base rate for a given - # voxel is reflected by the number of times that voxel appears in the array - if not baseline: - xyz = vox2mm( - np.vstack(np.where(dset.masker.mask_img.get_fdata())).T, - dset.masker.mask_img.affine, - ) - else: - xyz = np.loadtxt(baseline) - - estimator = SCALE(xyz=xyz, n_iters=n_iters, n_cores=n_cores) - results = estimator.fit(dset) - - if output_dir is None: - output_dir = os.path.dirname(dataset_file) - else: - pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True) - - if prefix is None: - base = os.path.basename(dataset_file) - prefix, _ = os.path.splitext(base) - prefix += "_" - elif not prefix.endswith("_"): - prefix = prefix + "_" - - results.save_maps(output_dir=output_dir, prefix=prefix) - copyfile(dataset_file, os.path.join(output_dir, prefix + "input_coordinates.txt")) - - LGR.info("Workflow completed.") - LGR.info(boilerplate)