Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/main' into user-guide
Browse files Browse the repository at this point in the history
  • Loading branch information
tsalo committed Aug 9, 2022
2 parents 67c82c9 + a4a7d16 commit b9e31d0
Show file tree
Hide file tree
Showing 37 changed files with 530 additions and 902 deletions.
2 changes: 0 additions & 2 deletions .codecov.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,4 @@ coverage:

ignore:
- 'nimare/tests/'
- 'nimare/due.py'
- 'nimare/_version.py'
- 'nimare/references.py'
3 changes: 1 addition & 2 deletions docs/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ For more information about the components of coordinate-based meta-analysis in N
:toctree: generated/
:template: class.rst

diagnostics.FocusFilter
diagnostics.Jackknife
diagnostics.FocusCounter

Expand Down Expand Up @@ -307,9 +308,7 @@ For more information about fetching data from the internet, see :ref:`fetching t
:template: function.rst

workflows.ale_sleuth_workflow
workflows.conperm_workflow
workflows.macm_workflow
workflows.scale_workflow


.. _api_base_ref:
Expand Down
18 changes: 18 additions & 0 deletions docs/cbma.rst
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,24 @@ The Monte Carlo FWE correction approach implemented in NiMARE produces three new
**Voxel-level correction is generally more conservative than cluster-level correction,
so it is only recommended for very large meta-analyses (i.e., hundreds of studies).**

.. important::

Starting in version 0.0.13, clusters in the cluster-level corrected images are defined using
faces connectivity (also known as 1st nearest-neighbor, NN1, or 6 neighbor connectivity),
which counts voxels sharing a face as connected.
This is more restrictive than other connectivity structures,
including faces+edges (aka 2nd nearest-neighbor, NN2, or 18 neighbor connectivity)
and faces+edges+corners (aka 3rd nearest-neighbor, NN3, or 26 neighbor connectivity).

Prior to version 0.0.13, clusters were defined using faces+edges connectivity.

Different tools use different connectivity structures.
Nilearn uses faces connectivity, like NiMARE, while SPM uses faces+edges.
FSL allows users to select one of the three connectivity structures, using the ``--connectivity`` parameter.
Most AFNI programs also allow users to select a connectivity structure,
though the actual parameter differs across programs.


.. admonition:: What about threshold-free cluster enhancement?

TFCE :footcite:p:`smith2009threshold` is a voxel-level metric that combines signal magnitude and
Expand Down
1 change: 1 addition & 0 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@
# -----------------------------------------------------------------------------
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_custom_sections = ["License"]
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
Expand Down
2 changes: 1 addition & 1 deletion examples/01_datasets/02_download_neurosynth.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@
version="1",
overwrite=False,
source="combined",
vocab="neuroquery7547",
vocab="neuroquery6308",
type="tfidf",
)
# Note that the files are saved to a new folder within "out_dir" named "neuroquery".
Expand Down
4 changes: 2 additions & 2 deletions examples/misc-notebooks/save_nidm_to_dset.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@
" binarized[binarized>0] = 1\n",
" binarized[binarized<0] = 0\n",
" binarized = binarized.astype(int)\n",
" labeled = ndimage.measurements.label(binarized, np.ones((3, 3, 3)))[0]\n",
" labeled = ndimage.label(binarized, np.ones((3, 3, 3)))[0]\n",
" clust_ids = sorted(list(np.unique(labeled)[1:]))\n",
" ijk = np.hstack([np.where(data * (labeled == c) == np.max(data * (labeled == c))) for c in clust_ids])\n",
" ijk = ijk.T\n",
Expand Down Expand Up @@ -259,7 +259,7 @@
" binarized[binarized>0] = 1\n",
" binarized[binarized<0] = 0\n",
" binarized = binarized.astype(int)\n",
" labeled = ndimage.measurements.label(binarized, np.ones((3, 3, 3)))[0]\n",
" labeled = ndimage.label(binarized, np.ones((3, 3, 3)))[0]\n",
" clust_ids = sorted(list(np.unique(labeled)[1:]))\n",
" \n",
" peak_vals = np.array([np.max(data * (labeled == c)) for c in clust_ids])\n",
Expand Down
4 changes: 0 additions & 4 deletions nimare/annotate/cogat.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,13 @@
import numpy as np
import pandas as pd

from nimare import references
from nimare.annotate import utils
from nimare.due import due
from nimare.extract import download_cognitive_atlas
from nimare.utils import _uk_to_us

LGR = logging.getLogger(__name__)


@due.dcite(references.COGNITIVE_ATLAS, description="Introduces the Cognitive Atlas.")
class CogAtLemmatizer(object):
"""Replace synonyms and abbreviations with Cognitive Atlas identifiers in text.
Expand Down Expand Up @@ -94,7 +91,6 @@ def transform(self, text, convert_uk=True):
return text


@due.dcite(references.COGNITIVE_ATLAS, description="Introduces the Cognitive Atlas.")
def extract_cogat(text_df, id_df=None, text_column="abstract"):
"""Extract Cognitive Atlas terms and count instances using regular expressions.
Expand Down
7 changes: 0 additions & 7 deletions nimare/annotate/gclda.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,12 @@
from nilearn._utils import load_niimg
from scipy.stats import multivariate_normal

from nimare import references
from nimare.base import NiMAREBase
from nimare.due import due
from nimare.utils import get_template

LGR = logging.getLogger(__name__)


@due.dcite(references.GCLDAMODEL)
class GCLDAModel(NiMAREBase):
"""Generate a generalized correspondence latent Dirichlet allocation (GCLDA) topic model.
Expand Down Expand Up @@ -717,10 +714,6 @@ def _update_regions(self):
self.topics["regions_mu"][i_topic, j_region, ...] = mu
self.topics["regions_sigma"][i_topic, j_region, ...] = sigma

@due.dcite(
references.LOG_LIKELIHOOD,
description="Describes method for computing log-likelihood used in model.",
)
def compute_log_likelihood(self, model=None, update_vectors=True):
"""Compute log-likelihood of a model object given current model.
Expand Down
7 changes: 0 additions & 7 deletions nimare/annotate/lda.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,10 @@
import pandas as pd
from sklearn.decomposition import LatentDirichletAllocation

from nimare import references
from nimare.annotate.text import generate_counts
from nimare.base import NiMAREBase
from nimare.due import due


@due.dcite(references.LDA, description="Introduces LDA.")
@due.dcite(
references.LDAMODEL,
description="First use of LDA for automated annotation of neuroimaging literature.",
)
class LDAModel(NiMAREBase):
"""Generate a latent Dirichlet allocation (LDA) topic model.
Expand Down
4 changes: 2 additions & 2 deletions nimare/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,11 +328,11 @@ def fit(self, dataset, drop_invalid=True):
"""
self._collect_inputs(dataset, drop_invalid=drop_invalid)
self._preprocess_input(dataset)
maps = self._fit(dataset)
maps, tables = self._fit(dataset)

if hasattr(self, "masker") and self.masker is not None:
masker = self.masker
else:
masker = dataset.masker

return MetaResult(self, masker, maps)
return MetaResult(self, mask=masker, maps=maps, tables=tables)
101 changes: 0 additions & 101 deletions nimare/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,7 @@

from nimare.io import convert_neurosynth_to_json, convert_sleuth_to_json
from nimare.workflows.ale import ale_sleuth_workflow
from nimare.workflows.conperm import conperm_workflow
from nimare.workflows.macm import macm_workflow
from nimare.workflows.scale import scale_workflow


def _is_valid_file(parser, arg):
Expand Down Expand Up @@ -92,52 +90,6 @@ def _get_parser():
help=("Number of processes to use for meta-analysis. If -1, use all available cores."),
)

# Contrast permutation workflow
conperm_parser = subparsers.add_parser(
"conperm",
help=(
"Meta-analysis of contrast maps using random effects and "
"two-sided inference with empirical (permutation-based) null "
"distribution and Family Wise Error multiple comparisons "
"correction. Input may be a list of 3D files or a single 4D "
"file."
),
)
conperm_parser.set_defaults(func=conperm_workflow)
conperm_parser.add_argument(
"contrast_images",
nargs="+",
metavar="FILE",
type=lambda x: _is_valid_file(parser, x),
help=("Data to analyze. May be a single 4D file or a list of 3D files."),
)
conperm_parser.add_argument(
"--mask",
dest="mask_image",
metavar="FILE",
type=lambda x: _is_valid_file(parser, x),
help=("Mask file."),
default=None,
)
conperm_parser.add_argument(
"--output_dir",
dest="output_dir",
metavar="PATH",
type=str,
help=("Output directory."),
default=".",
)
conperm_parser.add_argument(
"--prefix", dest="prefix", type=str, help=("Common prefix for output maps."), default=""
)
conperm_parser.add_argument(
"--n_iters",
dest="n_iters",
type=int,
help=("Number of iterations for permutation testing."),
default=10000,
)

# MACM
macm_parser = subparsers.add_parser(
"macm",
Expand Down Expand Up @@ -192,59 +144,6 @@ def _get_parser():
help=("Number of processes to use for meta-analysis. If -1, use all available cores."),
)

# SCALE
scale_parser = subparsers.add_parser(
"scale",
help=(
"Method for performing Specific CoActivation Likelihood "
"Estimation (SCALE), a modified meta-analytic coactivation "
"modeling (MACM) that takes activation frequency bias into "
"account, for delineating distinct core networks of "
"coactivation, using a permutation-based approach."
),
)
scale_parser.set_defaults(func=scale_workflow)
scale_parser.add_argument(
"dataset_file", type=lambda x: _is_valid_file(parser, x), help=("Dataset file to analyze.")
)
scale_parser.add_argument(
"--baseline",
type=lambda x: _is_valid_file(parser, x),
help=("Voxel-wise baseline activation rates."),
)
scale_parser.add_argument(
"--output_dir",
dest="output_dir",
metavar="PATH",
type=str,
help=("Output directory."),
default=".",
)
scale_parser.add_argument(
"--prefix", dest="prefix", type=str, help=("Common prefix for output maps."), default=""
)
scale_parser.add_argument(
"--n_iters",
dest="n_iters",
type=int,
help=("Number of iterations for permutation testing."),
default=2500,
)
scale_parser.add_argument(
"--v_thr",
dest="v_thr",
type=float,
help=("Voxel p-value threshold used to create clusters."),
default=0.001,
)
scale_parser.add_argument(
"--n_cores",
dest="n_cores",
type=int,
default=1,
help=("Number of processes to use for meta-analysis. If -1, use all available cores."),
)

# Conversion workflows
sleuth2nimare_parser = subparsers.add_parser(
"sleuth2nimare", help=("Convert a Sleuth text file to a NiMARE json file.")
Expand Down
22 changes: 14 additions & 8 deletions nimare/correct.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def transform(self, result):
Returns
-------
result : :obj:`~nimare.results.MetaResult`
MetaResult with new corrected maps added.
MetaResult with new corrected maps and tables added.
"""
correction_method = f"correct_{self._correction_method}_{self.method}"

Expand All @@ -172,15 +172,18 @@ def transform(self, result):
"Using correction method implemented in Estimator: "
f"{est.__class__.__module__}.{est.__class__.__name__}.{correction_method}."
)
corr_maps = getattr(est, correction_method)(result, **self.parameters)
corr_maps, corr_tables = getattr(est, correction_method)(result, **self.parameters)
else:
self._collect_inputs(result)
corr_maps = self._transform(result, method=correction_method)
corr_maps, corr_tables = self._transform(result, method=correction_method)

# Update corrected map names and add them to maps dict
corr_maps = {(k + self._name_suffix): v for k, v in corr_maps.items()}
result.maps.update(corr_maps)

corr_tables = {(k + self._name_suffix): v for k, v in corr_tables.items()}
result.tables.update(corr_tables)

# Update the estimator as well, in order to retain updated null distributions
result.estimator = est

Expand Down Expand Up @@ -208,6 +211,9 @@ def _transform(self, result, method):
The map names must _not_ include the ``_name_suffix``:, as that will be added in
``transform()`` (i.e., return "p" not "p_corr-FDR_q-0.05_method-indep").
corr_tables : :obj:`dict`
An empty dictionary meant to contain any tables (pandas DataFrames) produced by the
correction procedure.
"""
p = result.maps["p"]

Expand All @@ -217,15 +223,15 @@ def _transform(self, result, method):
p_no_nans = p[nonnan_mask]

# Call the correction method
p_corr_no_nans = getattr(self, method)(p_no_nans)
p_corr_no_nans, tables = getattr(self, method)(p_no_nans)

# Unmask the corrected p values based on the NaN mask
p_corr[nonnan_mask] = p_corr_no_nans

# Create a dictionary of the corrected results
corr_maps = {"p": p_corr}
self._generate_secondary_maps(result, corr_maps)
return corr_maps
return corr_maps, tables


class FWECorrector(Corrector):
Expand Down Expand Up @@ -289,7 +295,7 @@ def correct_fwe_bonferroni(self, p):
--------
nimare.stats.bonferroni
"""
return bonferroni(p)
return bonferroni(p), {}


class FDRCorrector(Corrector):
Expand Down Expand Up @@ -357,7 +363,7 @@ def correct_fdr_indep(self, p):
--------
pymare.stats.fdr
"""
return fdr(p, q=self.alpha, method="bh")
return fdr(p, q=self.alpha, method="bh"), {}

def correct_fdr_negcorr(self, p):
"""Perform Benjamini-Yekutieli FDR correction.
Expand Down Expand Up @@ -397,4 +403,4 @@ def correct_fdr_negcorr(self, p):
--------
pymare.stats.fdr
"""
return fdr(p, q=self.alpha, method="by")
return fdr(p, q=self.alpha, method="by"), {}
2 changes: 1 addition & 1 deletion nimare/decode/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def _preprocess_input(self, dataset):
if not len(features):
raise Exception("No features identified in Dataset!")
elif len(features) < n_features_orig:
LGR.info(f"Retaining {len(features)}/({n_features_orig} features.")
LGR.info(f"Retaining {len(features)}/{n_features_orig} features.")

self.features_ = features

Expand Down
Loading

0 comments on commit b9e31d0

Please sign in to comment.