Skip to content

Commit

Permalink
Extract relevant work from PETVolume PR
Browse files Browse the repository at this point in the history
  • Loading branch information
NicolasGensollen committed Sep 30, 2022
1 parent 6189af2 commit 9db0ce9
Show file tree
Hide file tree
Showing 3 changed files with 96 additions and 32 deletions.
5 changes: 3 additions & 2 deletions clinica/pydra/engine_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,11 +106,12 @@ def caps_query(query: dict) -> dict:
}
query_dict = {}
for k, v in query.items():
query_dict[k] = {}
if k in caps_keys_available_file_reader:
query_dict[k] = caps_keys_available_file_reader[k](**v)
query_dict[k]["query"] = caps_keys_available_file_reader[k](**v)
query_dict[k]["reader"] = "file"
elif k in caps_keys_available_group_reader:
query_dict[k] = caps_keys_available_group_reader[k](**v)
query_dict[k]["query"] = caps_keys_available_group_reader[k](**v)
query_dict[k]["reader"] = "group"
return query_dict

Expand Down
45 changes: 35 additions & 10 deletions clinica/pydra/interfaces.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,21 +41,46 @@ def _list_outputs(self):
self.inputs.base_dir,
is_bids_dir=False,
)
query = {}
output_query = {}
for k, q in self.inputs.output_query.items():
reader = q.pop("reader")
reader = q["reader"]
query = q["query"]
if reader == "file":
query[k] = clinica_file_reader(
subjects,
sessions,
self.inputs.base_dir,
q,
)
if isinstance(query, list):
temp = [
clinica_file_reader(
subjects,
sessions,
self.inputs.base_dir,
sub_query,
)[0]
for sub_query in query
]
if len(temp) != len(subjects) and len(temp[0]) == len(subjects):
transpose = []
for x in zip(*temp):
transpose.append(x)
assert len(transpose) == len(subjects)
temp = transpose
output_query[k] = temp
else:
output_query[k] = clinica_file_reader(
subjects,
sessions,
self.inputs.base_dir,
query,
)[0]
elif reader == "group":
query[k] = clinica_group_reader(self.inputs.base_dir, q)
if isinstance(query, list):
output_query[k] = [
clinica_group_reader(self.inputs.base_dir, sub_query)
for sub_query in query
]
else:
output_query[k] = clinica_group_reader(self.inputs.base_dir, query)
else:
raise ValueError(f"Unknown reader {reader}.")
return query
return output_query

def _add_output_traits(self, base):
return add_traits(base, list(self.inputs.output_query.keys()))
Expand Down
78 changes: 58 additions & 20 deletions clinica/utils/input_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,11 @@
These dictionaries describe files to grab.
"""

import functools
from collections.abc import Iterable

import numpy as np

""" T1w """

# BIDS
Expand Down Expand Up @@ -194,12 +199,54 @@
# T1-Volume


def aggregator(func):
"""If the decorated function receives iterable arguments,
this decorator will call the decorated function for each
value in the iterable and aggregate the results in a list.
This works only if the iterables provided have the same length.
Arguments lefts as non-iterable will be repeated.
"""

@functools.wraps(func)
def wrapper_aggregator(*args, **kwargs):
arg_sizes = [len(arg) for arg in args if isinstance(arg, Iterable)]
arg_sizes += [
len(arg) for k, arg in kwargs.items() if isinstance(arg, Iterable)
]
if len(np.unique(arg_sizes)) > 1:
raise ValueError(f"Arguments must have the same length.")
if len(arg_sizes) == 0:
return func(*args, **kwargs)
arg_size = arg_sizes[0]
new_args = []
for arg in args:
if not isinstance(arg, Iterable):
new_args.append((arg,) * arg_size)
else:
new_args.append(arg)
new_kwargs = [{} for _ in range(arg_size)]
for k, arg in kwargs.items():
for i in range(len(new_kwargs)):
if not isinstance(arg, Iterable):
new_kwargs[i][k] = arg
else:
new_kwargs[i][k] = arg[i]
if len(new_args) == 0:
return [func(**x) for x in new_kwargs]
elif len(new_kwargs) == 0:
return [func(*x) for x in zip(*new_args)]
return [func(*x, **y) for x, y in zip(zip(*new_args), new_kwargs)]

return wrapper_aggregator


@aggregator
def t1_volume_native_tpm(tissue_number):
import os

from .spm import INDEX_TISSUE_MAP

information = {
return {
"pattern": os.path.join(
"t1",
"spm",
Expand All @@ -210,15 +257,15 @@ def t1_volume_native_tpm(tissue_number):
"description": f"Tissue probability map {INDEX_TISSUE_MAP[tissue_number]} in native space",
"needed_pipeline": "t1-volume-tissue-segmentation",
}
return information


@aggregator
def t1_volume_dartel_input_tissue(tissue_number):
import os

from .spm import INDEX_TISSUE_MAP

information = {
return {
"pattern": os.path.join(
"t1",
"spm",
Expand All @@ -229,21 +276,18 @@ def t1_volume_dartel_input_tissue(tissue_number):
"description": f"Dartel input for tissue probability map {INDEX_TISSUE_MAP[tissue_number]} from T1w MRI",
"needed_pipeline": "t1-volume-tissue-segmentation",
}
return information


@aggregator
def t1_volume_native_tpm_in_mni(tissue_number, modulation):
import os

from .spm import INDEX_TISSUE_MAP

if modulation:
pattern_modulation = "on"
description_modulation = "with"
else:
pattern_modulation = "off"
description_modulation = "without"
information = {
pattern_modulation = "on" if modulation else "off"
description_modulation = "with" if modulation else "without"

return {
"pattern": os.path.join(
"t1",
"spm",
Expand All @@ -257,21 +301,16 @@ def t1_volume_native_tpm_in_mni(tissue_number, modulation):
),
"needed_pipeline": "t1-volume-tissue-segmentation",
}
return information


def t1_volume_template_tpm_in_mni(group_label, tissue_number, modulation):
import os

from .spm import INDEX_TISSUE_MAP

if modulation:
pattern_modulation = "on"
description_modulation = "with"
else:
pattern_modulation = "off"
description_modulation = "without"
information = {
pattern_modulation = "on" if modulation else "off"
description_modulation = "with" if modulation else "without"
return {
"pattern": os.path.join(
"t1",
"spm",
Expand All @@ -285,7 +324,6 @@ def t1_volume_template_tpm_in_mni(group_label, tissue_number, modulation):
),
"needed_pipeline": "t1-volume",
}
return information


def t1_volume_deformation_to_template(group_label):
Expand Down

0 comments on commit 9db0ce9

Please sign in to comment.