diff --git a/docs/building_decision_trees.rst b/docs/building_decision_trees.rst index bcfe913aa..acc28d806 100644 --- a/docs/building_decision_trees.rst +++ b/docs/building_decision_trees.rst @@ -288,9 +288,8 @@ tree function: - ``custom_node_label``: A brief label for what happens in this node that can be used in a decision tree summary table or flow chart. If custom_node_label is not not defined, then each function has default descriptive text. -- ``log_extra_report``, ``log_extra_info``: Text for each function call is automatically placed - in the logger output. In addition to that text, the text in these these strings will - also be included in the logger with the report or info codes respectively. These +- ``log_extra_info``: Text for each function call is automatically placed + in the logger output with the info label. These might be useful to give a narrative explanation of why a step was parameterized a certain way. - ``only_used_metrics``: If true, this function will only return the names of the component @@ -351,8 +350,8 @@ that should be used instead. Calculation nodes should check if the value they are calculating was already calculated and output a warning if the function overwrites an existing value -Code that adds the text ``log_extra_info`` and ``log_extra_report`` into the appropriate -logs (if they are provided by the user) +Code that adds the text ``log_extra_info`` into the output +log (if they are provided by the user) After the above information is included, all functions will call :func:`~tedana.selection.selection_utils.selectcomps2use`, diff --git a/pyproject.toml b/pyproject.toml index 4f4d3d703..20ededd62 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,7 +27,7 @@ dependencies = [ "nibabel>=2.5.1,<=5.2.0", "nilearn>=0.7,<=0.10.3", "numpy>=1.16,<=1.26.4", - "pandas>=2.0,<=2.2.0", + "pandas>=2.0,<=2.2.1", "pybtex", "pybtex-apa-style", "robustica>=0.1.3", diff --git a/tedana/docs.py b/tedana/docs.py index 4165a590c..aa3a8da4e 100644 --- a/tedana/docs.py +++ b/tedana/docs.py @@ -39,13 +39,6 @@ components. """ -docdict[ - "log_extra_report" -] = """ -log_extra_report : :obj:`str` - Additional text to the report log. Default="". -""" - docdict[ "log_extra_info" ] = """ diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index 716fdc311..308dc826c 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -1,7 +1,7 @@ { "tree_id": "kundu_MEICA27_decision_tree", - "info": "Following the full decision tree designed by Prantik Kundu", - "report": "This is based on the criteria of the MEICA v2.5 decision tree \\citep{kundu2013integrated}.", + "info": "Following the decision tree close to one designed by Prantik Kundu", + "report": "This is the kundu tree \\citep{tedana_decision_trees} based on the criteria of the MEICA v2.5 decision tree \\citep{kundu2013integrated}. For a description of the decision tree steps, with the rationale for each step, see \\citep{olafsson2015enhanced}.", "necessary_metrics": [ "kappa", "rho", @@ -30,6 +30,7 @@ "Accept borderline", "No provisional accept" ], + "_comment": "More information on the kundu decision tree and how it differs from other options is at https://tedana.readthedocs.io/en/stable/included_decision_trees.html. Descriptions of the metrics used are in desc-tedana_metrics.json, which is ouputted when this tree is run", "nodes": [ { "functionname": "manual_classify", @@ -38,10 +39,11 @@ "decide_comps": "all" }, "kwargs": { - "log_extra_report": "", "clear_classification_tags": true, - "dont_warn_reclassify": true - } + "dont_warn_reclassify": true, + "log_extra_info": "" + }, + "_comment": "All components are initially labeled as 'unclassified'." }, { "functionname": "dec_left_op_right", @@ -54,9 +56,10 @@ "right": "kappa" }, "kwargs": { - "tag_if_true": "Unlikely BOLD" + "tag_if_true": "Unlikely BOLD", + "log_extra_info": "" }, - "_comment": "Code I002 in premodularized tedana" + "_comment": "The first four steps are for rejecting components that very unlikely to have substantial T2* signal. Any components with rho greater than kappa are rejected (Code I002 in premodularized tedana). Higher rho than kappa means that the component better fits the TE-independence (S0) model than the TE-dependence (T2*) model." }, { "functionname": "dec_left_op_right", @@ -72,9 +75,10 @@ "left2": "countsigFT2", "op2": ">", "right2": 0, - "tag_if_true": "Unlikely BOLD" + "tag_if_true": "Unlikely BOLD", + "log_extra_info": "" }, - "_comment": "Code I003 in premodularized tedana" + "_comment": "Any components with more voxels that are significant based on the S0 model's F-statistics than the T2* model's are rejected, as long as there is at least one significant voxel for the T2 model (Code I003 in premodularized tedana)" }, { "functionname": "calc_median", @@ -82,7 +86,11 @@ "decide_comps": "all", "metric_name": "variance explained", "median_label": "varex" - } + }, + "kwargs": { + "log_extra_info": "" + }, + "_comment": "The median variance explained is calculated across all components, for use in later steps." }, { "functionname": "dec_left_op_right", @@ -98,9 +106,10 @@ "left2": "variance explained", "op2": ">", "right2": "median_varex", - "tag_if_true": "Unlikely BOLD" + "tag_if_true": "Unlikely BOLD", + "log_extra_info": "" }, - "_comment": "Code I004 in premodularized tedana" + "_comment": "Any components with higher S0 model beta map-F-statistic map Dice similarity index than T2 model beta map-F-statistic map Dice similarity index and greater than median variance explained are rejected. In slightly plainer English, this step rejects any high-variance components where significant voxels in the F-stat map overlap more with highly S0-associated voxels than T2*-associated voxels. (Code I004 in premodularized tedana)" }, { "functionname": "dec_left_op_right", @@ -116,9 +125,10 @@ "left2": "variance explained", "op2": ">", "right2": "median_varex", - "tag_if_true": "Unlikely BOLD" + "tag_if_true": "Unlikely BOLD", + "log_extra_info": "" }, - "_comment": "Code I005 in premodularized tedana" + "_comment": "Any components with a negative t-statistic comparing the distribution of T2* model F-statistics from voxels in clusters to those of voxels not in clusters and variance explained greater than median are rejected. That is reject any high-variance components exhibiting more 'speckled' T2*-associated voxels than 'clustered' ones. (Code I005 in premodularized tedana)" }, { "functionname": "calc_kappa_elbow", @@ -126,10 +136,9 @@ "decide_comps": "all" }, "kwargs": { - "log_extra_info": "", - "log_extra_report": "" + "log_extra_info": "" }, - "_comment": "" + "_comment": "The kappa elbow is calculated from all components, for use in later steps." }, { "functionname": "dec_reclassify_high_var_comps", @@ -138,10 +147,9 @@ "new_classification": "unclass_highvar" }, "kwargs": { - "log_extra_info": "", - "log_extra_report": "" + "log_extra_info": "" }, - "_comment": "" + "_comment": "Unclassified components exhibiting a large step down in variance explained are classified as 'unclassified high-variance' and excluded or partially excluded from several steps below." }, { "functionname": "calc_rho_elbow", @@ -151,10 +159,9 @@ "kwargs": { "subset_decide_comps": "unclassified", "rho_elbow_type": "kundu", - "log_extra_info": "", - "log_extra_report": "" + "log_extra_info": "" }, - "_comment": "" + "_comment": "This step determines the 'rho elbow' based on the rho values for all of the components, as well as just the unclassified components (excluding unclass_highvar). It calculates the elbow for each set of components, as well as the F-statistic threshold associated with p < 0.05 given the number of echoes, and then takes the mean of the three values." }, { "functionname": "dec_left_op_right", @@ -167,8 +174,9 @@ "right": "kappa_elbow_kundu" }, "kwargs": { - "log_extra_report": "" - } + "log_extra_info": "" + }, + "_comment": "Any unclassified components with kappa greater than or equal to the kappa elbow are provisionally accepted." }, { "functionname": "dec_left_op_right", @@ -183,8 +191,9 @@ "right": "rho_elbow_kundu" }, "kwargs": { - "log_extra_report": "" - } + "log_extra_info": "" + }, + "_comment": "Any provisionally accepted components with rho greater than the rho elbow are reset to 'unclassified'." }, { "functionname": "dec_classification_doesnt_exist", @@ -200,8 +209,7 @@ "kwargs": { "at_least_num_exist": 2, "tag": "No provisional accept", - "log_extra_info": "If nothing is provisionally accepted by this point, then rerun ICA & selection. If max iterations of rerunning done, then accept everything not already rejected", - "log_extra_report": "" + "log_extra_info": "If nothing is provisionally accepted by this point, then rerun ICA & selection. If max iterations of rerunning done, then accept everything not already rejected" }, "_comment": "Code I006 in premodularized tedana" }, @@ -212,7 +220,10 @@ "thresh_label": "upper", "percentile_thresh": 90 }, - "kwargs": {} + "kwargs": { + "log_extra_info": "" + }, + "_comment": "The variance explained upper threshold is calculated as the 90th percentile of variance explained from provisionally accepted components." }, { "functionname": "calc_varex_thresh", @@ -221,29 +232,38 @@ "thresh_label": "lower", "percentile_thresh": 25 }, - "kwargs": {} + "kwargs": { + "log_extra_info": "" + }, + "_comment": "The variance explained lower threshold is calculated as the 25th percentile of variance explained from provisionally accepted components." }, { "functionname": "calc_extend_factor", "parameters": {}, - "kwargs": {}, - "_comment": "This is a scaling number that is used for a few thresholds. 2 if fewer than 90 fMRI volumes, 3 if more than 110 and linear in-between" + "kwargs": { + "log_extra_info": "" + }, + "_comment": "'extend factor' is a scaling number that is used for a few thresholds. 2 if fewer than 90 fMRI volumes, 3 if more than 110 and linear in-between. In the original MEICA, this was discrete with no linear slope between 90 & 110 so this might result in a small difference in results from MEICA for runs with 91-109 volumes." }, { "functionname": "calc_max_good_meanmetricrank", "parameters": { "decide_comps": "provisionalaccept" }, - "kwargs": {}, - "_comment": "Number of provisionalaccept components * extend_factor" + "kwargs": { + "log_extra_info": "" + }, + "_comment": "'max_good_meanmetricrank' is the number of provisionalaccept components * extend_factor" }, { "functionname": "calc_varex_kappa_ratio", "parameters": { "decide_comps": "provisionalaccept" }, - "kwargs": {}, - "_comment": "This is used to calculate the new 'varex kappa ratio' column in the component_table" + "kwargs": { + "log_extra_info": "" + }, + "_comment": "'varex kappa ratio' is a new column in the component table. It's calcualted from the provisionally accepted components and is the maximum kappa minus the minimum kappa, divided by the maximum variance explained minus the minimum variance explained." }, { "functionname": "dec_left_op_right", @@ -264,10 +284,10 @@ "left2": "variance explained", "right2": "varex_upper_thresh", "right2_scale": "extend_factor", - "log_extra_info": "If variance and d_table_scores are high, then reject", - "tag_if_true": "Less likely BOLD" + "tag_if_true": "Less likely BOLD", + "log_extra_info": "If variance and d_table_scores are high, then reject" }, - "_comment": "Code I007 in premodularized tedana. One of several steps that makes it more likely to reject high variance components" + "_comment": "One of several steps that makes it more likely to reject high variance components. Any provisionally accepted, unclassified, or unclassified high-variance components with a decision table score greater than 'max_good_meanmetricrank' and variance explained greater than the variance explained upper threshold multiplied by the extend factor are rejected. (Code I007 in premodularized tedana.)" }, { "functionname": "dec_left_op_right", @@ -293,7 +313,7 @@ "right3": "kappa_elbow_kundu", "log_extra_info": "If low variance, accept even if bad kappa & d_table_scores" }, - "_comment": "Code I008 in premodularized tedana" + "_comment": "Any provisionally accepted, unclassified, or unclassified high-variance components with a decision table score greater than 'max_good_meanmetricrank', variance explained less than or equal to the variance explained lower threshold, and kappa less than or equal to the kappa elbow will be accepted and labeled as 'low variance'. (Code I008 in premodularized tedana)" }, { "functionname": "dec_classification_doesnt_exist", @@ -311,10 +331,9 @@ }, "kwargs": { "tag": "Likely BOLD", - "log_extra_info": "If nothing left is unclassified, then accept all", - "log_extra_report": "" + "log_extra_info": "If nothing left is unclassified, then accept all" }, - "_comment": "No code in premodularized tedana" + "_comment": "If no components are still labeled as unclassified or unclassified high-variance, then all remaining provisionally accepted components are accepted." }, { "functionname": "calc_revised_meanmetricrank_guesses", @@ -325,8 +344,10 @@ "unclass_highvar" ] }, - "kwargs": {}, - "_comment": "Add more here" + "kwargs": { + "log_extra_info": "" + }, + "_comment": "If any components are still labeled as unclassified or unclassified high-variance, then a revised decision table score is calculated from the provisionally accepted, unclassified, and unclassified high-variance components." }, { "functionname": "dec_left_op_right", @@ -354,7 +375,7 @@ "right3_scale": "extend_factor", "log_extra_info": "Reject if a combination of kappa, variance, and other factors are ranked worse than others" }, - "_comment": "Code I009 in premodularized tedana. Quirky combination 1 of a bunch of metrics that deal with rejecting some edge cases" + "_comment": "A quirky combination of a bunch of metrics that deal with rejecting some edge cases. Any provisionally accepted, unclassified, or unclassified high-variance components with a revised decision tree score greater than the 'conservative_guess', variance explained-kappa ratio greater than the extend factor times two, and variance explained greater than the variance explained upper threshold times the extend factor are rejected. (Code I009 in premodularized tedana)" }, { "functionname": "dec_left_op_right", @@ -379,7 +400,7 @@ "right2_scale": "extend_factor", "log_extra_info": "Reject if a combination of variance and ranks of other metrics are worse than others" }, - "_comment": "Code I010 in premodularized tedana. Quirky combination 2 of a bunch of metrics that deal with rejecting some edge cases" + "_comment": "A quirky combination of a bunch of metrics that deal with rejecting some edge cases. Any provisionally accepted, unclassified, or unclassified high-variance components with a revised decision table score greater than 'num_acc_guess' times 0.9 and variance explained greater than variance explained lower threshold times the extend factor are rejected. (Code I010 in premodularized tedana)" }, { "functionname": "calc_varex_thresh", @@ -393,8 +414,10 @@ "percentile_thresh": 25 }, "kwargs": { - "num_highest_var_comps": "num_acc_guess" - } + "num_highest_var_comps": "num_acc_guess", + "log_extra_info": "" + }, + "_comment": "An updated variance explained lower threshold (25th percentile) is calculated from the 'num_acc_guess' highest variance explained components among the remaining provisionally accepted, unclassified, and unclassified high-variance components." }, { "functionname": "dec_left_op_right", @@ -417,7 +440,7 @@ "right2": "varex_new_lower_thresh", "log_extra_info": "Accept components with a bad d_table_score, but are at the higher end of the remaining variance so more cautious to not remove" }, - "_comment": "Code I011 in premodularized tedana. Yet another quirky criterion, but this one to keep components. In the original tree, varex_new_lower_thresh would be lower than it is here. If there are differences in results, might be worth adding a scaling factor" + "_comment": "Another quirky criterion, but this one to keep components. Any provisionally accepted, unclassified, or unclassified high-variance components with a revised decision table score greater than 'num_acc_guess' and variance explained greater than the new variance explained lower threshold are accepted and labeled as 'borderline'. Prior to tedana vs 23.0.1 a mistake meant varex_new_lower_thresh would be lower than it is here and that might cause different results (Code I011 in premodularized tedana)." }, { "functionname": "dec_left_op_right", @@ -440,7 +463,7 @@ "right2": "varex_new_lower_thresh", "log_extra_info": "For not already rejected components, accept ones below the kappa elbow, but at the higher end of the remaining variance so more cautious to not remove" }, - "_comment": "Code I012 in premodularized tedana. Yet another quirky criterion, but this one to keep components. In the original tree, varex_new_lower_thresh might be lower than it is here. If there are differences in results, might be worth adding a scaling factor" + "_comment": "Another quirky criterion to keep components. Any provisionally accepted, unclassified, or unclassified high-variance components with kappa less than or equal to the kappa elbow and variance explained greater than the new variance explained lower threshold are accepted and labeled as 'borderline'. Prior to tedana vs 23.0.1 a mistake meant varex_new_lower_thresh would be lower than it is here and that might cause different results.(Code I012 in premodularized tedana)" }, { "functionname": "manual_classify", @@ -453,11 +476,10 @@ ] }, "kwargs": { - "log_extra_info": "Anything still provisional (accepted or rejected) should be accepted", - "log_extra_report": "", - "tag": "Likely BOLD" + "tag": "Likely BOLD", + "log_extra_info": "Anything still provisional (accepted or rejected) should be accepted" }, - "_comment": "No code in the premodularized tedana" + "_comment": "All remaining unclassified, unclassified high-variance, or provisionally accepted components are accepted." } ] } diff --git a/tedana/resources/decision_trees/minimal.json b/tedana/resources/decision_trees/minimal.json index eb06e541c..fc65a2966 100644 --- a/tedana/resources/decision_trees/minimal.json +++ b/tedana/resources/decision_trees/minimal.json @@ -1,7 +1,7 @@ { - "tree_id": "minimal_decision_tree_test1", - "info": "Proposed minimal decision tree", - "report": "This is based on the minimal criteria of the original MEICA decision tree \\citep{kundu2013integrated} without the more aggressive noise removal steps \\citep{dupre2021te}.", + "tree_id": "minimal_decision_tree", + "info": "first version of minimal decision tree", + "report": "The minimal decision tree \\citep{tedana_decision_trees} is a simplified version of the MEICA decision tree \\citep{kundu2013integrated,dupre2021te} without many criteria that do not rely on kappa and rho thresholds. ", "necessary_metrics": [ "kappa", "rho", @@ -21,6 +21,7 @@ "Unlikely BOLD", "Low variance" ], + "_comment": "More information on the minimial decision tree and how it differs from other options is at https://tedana.readthedocs.io/en/stable/included_decision_trees.html. Descriptions of the metrics used are in desc-tedana.metrics.json, which is ouputted when this tree is run", "nodes": [ { "functionname": "manual_classify", @@ -29,10 +30,11 @@ "decide_comps": "all" }, "kwargs": { - "log_extra_report": "", "clear_classification_tags": true, - "dont_warn_reclassify": true - } + "dont_warn_reclassify": true, + "log_extra_info": "" + }, + "_comment": "All components are initially labeled as 'unclassified'." }, { "functionname": "dec_left_op_right", @@ -45,9 +47,10 @@ "right": "kappa" }, "kwargs": { - "log_extra_report": "", - "tag_if_true": "Unlikely BOLD" - } + "tag_if_true": "Unlikely BOLD", + "log_extra_info": "" + }, + "_comment": "The first four steps are for rejecting components that very unlikely to have substantial T2* signal. Any components with rho greater than kappa are rejected. Higher rho than kappa means that the component better fits the TE-independence (S0) model than the TE-dependence (T2*) model." }, { "functionname": "dec_left_op_right", @@ -63,9 +66,10 @@ "left2": "countsigFT2", "op2": ">", "right2": 0, - "log_extra_report": "", - "tag_if_true": "Unlikely BOLD" - } + "tag_if_true": "Unlikely BOLD", + "log_extra_info": "" + }, + "_comment": "Any components with more voxels that are significant based on the S0 model's F-statistics than the T2* model's are rejected, as long as there is at least one significant voxel for the T2 model." }, { "functionname": "calc_median", @@ -73,7 +77,11 @@ "decide_comps": "all", "metric_name": "variance explained", "median_label": "varex" - } + }, + "kwargs": { + "log_extra_info": "" + }, + "_comment": "The median variance explained is calculated across all components, for use in later steps." }, { "functionname": "dec_left_op_right", @@ -89,9 +97,10 @@ "left2": "variance explained", "op2": ">", "right2": "median_varex", - "log_extra_report": "", - "tag_if_true": "Unlikely BOLD" - } + "tag_if_true": "Unlikely BOLD", + "log_extra_info": "" + }, + "_comment": "Any components with higher S0 model beta map-F-statistic map Dice similarity index than T2 model beta map-F-statistic map Dice similarity index and greater than median variance explained are rejected. In slightly plainer English, this step rejects any high-variance components where significant voxels in the F-stat map overlap more with highly S0-associated voxels than T2*-associated voxels." }, { "functionname": "dec_left_op_right", @@ -107,9 +116,10 @@ "left2": "variance explained", "op2": ">", "right2": "median_varex", - "log_extra_report": "", - "tag_if_true": "Unlikely BOLD" - } + "tag_if_true": "Unlikely BOLD", + "log_extra_info": "" + }, + "_comment": "Any components with a negative t-statistic comparing the distribution of T2* model F-statistics from voxels in clusters to those of voxels not in clusters and variance explained greater than median are rejected. That is reject any high-variance components exhibiting more 'speckled' T2*-associated voxels than 'clustered' ones." }, { "functionname": "calc_kappa_elbow", @@ -117,10 +127,9 @@ "decide_comps": "all" }, "kwargs": { - "log_extra_info": "", - "log_extra_report": "" + "log_extra_info": "" }, - "_comment": "" + "_comment": "The kappa elbow is calculated from all components, for use in later steps." }, { "functionname": "calc_rho_elbow", @@ -130,10 +139,9 @@ "kwargs": { "subset_decide_comps": "unclassified", "rho_elbow_type": "liberal", - "log_extra_info": "", - "log_extra_report": "" + "log_extra_info": "" }, - "_comment": "" + "_comment": "This step determines the 'rho elbow' based on the rho values for all of the components, as well as just the unclassified components. It calculates the elbow for each set of components and then takes the maximum of the two." }, { "functionname": "dec_left_op_right", @@ -146,8 +154,9 @@ "right": "kappa_elbow_kundu" }, "kwargs": { - "log_extra_report": "" - } + "log_extra_info": "" + }, + "_comment": "Any unclassified components with kappa greater than or equal to the kappa elbow are provisionally accepted. Any remaining unclassified components are provisionally rejected. Nothing is left 'unclassified'" }, { "functionname": "dec_left_op_right", @@ -160,11 +169,11 @@ "right": "rho" }, "kwargs": { - "log_extra_info": "If kappa>elbow and kappa>2*rho accept even if rho>elbow", - "log_extra_report": "", "right_scale": 2, - "tag_if_true": "Likely BOLD" - } + "tag_if_true": "Likely BOLD", + "log_extra_info": "" + }, + "_comment": "Any provisionally accepted components with kappa greater than two times rho are accepted. That is, even if a component has a high rho value, if kappa above threshold and substantially higher, assume it as something work keeping and accept it" }, { "functionname": "dec_left_op_right", @@ -180,8 +189,9 @@ "right": "rho_elbow_liberal" }, "kwargs": { - "log_extra_report": "" - } + "log_extra_info": "" + }, + "_comment": "Any provisionally accepted or provisionally rejected components with rho values greater than the liberal rho elbow are provisionally rejected." }, { "functionname": "dec_variance_lessthan_thresholds", @@ -192,12 +202,12 @@ }, "kwargs": { "var_metric": "variance explained", - "log_extra_info": "", - "log_extra_report": "", "single_comp_threshold": 0.1, "all_comp_threshold": 1.0, - "tag_if_true": "Low variance" - } + "tag_if_true": "Low variance", + "log_extra_info": "" + }, + "_comment": "This step flags remaining low-variance components (less than 0.1%) and accepts up to 1% cumulative variance across these components. This is done because these components don't explain enough variance to be worth further reducing the degrees of freedom of the denoised data." }, { "functionname": "manual_classify", @@ -206,10 +216,10 @@ "decide_comps": "provisionalaccept" }, "kwargs": { - "log_extra_info": "", - "log_extra_report": "", - "tag": "Likely BOLD" - } + "tag": "Likely BOLD", + "log_extra_info": "" + }, + "_comment": "All remaining provisionally accepted components are accepted." }, { "functionname": "manual_classify", @@ -221,10 +231,10 @@ ] }, "kwargs": { - "log_extra_info": "", - "log_extra_report": "", - "tag": "Unlikely BOLD" - } + "tag": "Unlikely BOLD", + "log_extra_info": "" + }, + "_comment": "All remaining unclassified (nothing should be unclassified) or provisionally rejected components are rejected." } ] } diff --git a/tedana/resources/references.bib b/tedana/resources/references.bib index 786165404..cf49765e6 100644 --- a/tedana/resources/references.bib +++ b/tedana/resources/references.bib @@ -314,6 +314,26 @@ @misc{sochat2015ttoz year = 2015 } +@article{olafsson2015enhanced, + title = {Enhanced identification of BOLD-like components with multi-echo simultaneous multi-slice (MESMS) fMRI and multi-echo ICA}, + author = {Olafsson, Valur and Kundu, Prantik and Wong, Eric C and Bandettini, Peter A and Liu, Thomas T}, + journal = {Neuroimage}, + volume = {112}, + pages = {43--51}, + year = {2015}, + publisher = {Elsevier}, + url = {https://doi.org/10.1016/j.neuroimage.2015.02.052}, + doi = {10.1016/j.neuroimage.2015.02.052} +} + +@article{tedana_decision_trees, + title = {Component selection decision trees in tedana}, + author = {tedana community}, + journal = {figshare}, + year = {2024}, + doi = {10.6084/m9.figshare.25251433.v1} +} + @Article{Anglada2022, Author = {Anglada-Girotto Miquel and Miravet-Verde Samuel and Serrano Luis and Head Sarah}, Title = {robustica: customizable robust independent component analysis}, @@ -323,3 +343,4 @@ @Article{Anglada2022 doi = {10.1186/s12859-022-05043-9}, year = 2022 } + diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index 623320b50..4c671dad2 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -149,7 +149,15 @@ def validate_tree(tree): # Only if kwargs are inputted, make sure they are all valid if node.get("kwargs") is not None: - invalid_kwargs = set(node.get("kwargs").keys()) - kwargs + node_kwargs = set(node.get("kwargs").keys()) + if "log_extra_report" in node_kwargs: + LGR.warning( + f"Node {i} includes the 'log_extra_report' parameter. " + "This was removed from the code and will not be used." + ) + node_kwargs.remove("log_extra_report") + del tree["nodes"][i]["kwargs"]["log_extra_report"] + invalid_kwargs = node_kwargs - kwargs if len(invalid_kwargs) > 0: err_msg += ( f"Node {i} has additional, undefined optional parameters (kwargs): " diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index e430cf0a3..170cff14b 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -19,7 +19,6 @@ ) LGR = logging.getLogger("GENERAL") -RepLGR = logging.getLogger("REPORT") @fill_doc @@ -28,7 +27,6 @@ def manual_classify( decide_comps, new_classification, clear_classification_tags=False, - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -61,7 +59,6 @@ def manual_classify( (Useful if manual_classify is used to reset all labels to unclassified). Default=False %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s @@ -107,8 +104,6 @@ def manual_classify( LGR.info(f"{function_name_idx}: {outputs['node_label']} ") if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) comps2use = selectcomps2use(selector, decide_comps) @@ -166,7 +161,6 @@ def dec_left_op_right( right3=None, left3_scale=1, right3_scale=1, - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -207,7 +201,6 @@ def dec_left_op_right( (left_scale*)left op (right_scale*right) AND (left2_scale*)left2 op2 (right2_scale*right2) if the "3" parameters are also defined then it's the intersection of all 3 statements %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s %(tag_if_true)s @@ -396,8 +389,6 @@ def operator_scale_descript(val_scale, val): LGR.info(f"{function_name_idx}: {if_true} if {outputs['node_label']}, else {if_false}") if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) confirm_metrics_exist( selector.component_table, outputs["used_metrics"], function_name=function_name_idx @@ -480,7 +471,6 @@ def dec_variance_lessthan_thresholds( var_metric="variance explained", single_comp_threshold=0.1, all_comp_threshold=1.0, - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -511,7 +501,6 @@ def dec_variance_lessthan_thresholds( The number of the variance for all components less than single_comp_threshold needs to be under this threshold. Default=1.0 %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s %(tag_if_true)s @@ -544,8 +533,6 @@ def dec_variance_lessthan_thresholds( LGR.info(f"{function_name_idx}: {if_true} if {outputs['node_label']}, else {if_false}") if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) comps2use = selectcomps2use(selector, decide_comps) confirm_metrics_exist( @@ -605,7 +592,6 @@ def calc_median( decide_comps, metric_name, median_label, - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -622,7 +608,6 @@ def calc_median( median_label : :obj:`str` The median will be saved in "median_(median_label)" %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s @@ -668,8 +653,6 @@ def calc_median( LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) comps2use = selectcomps2use(selector, decide_comps) confirm_metrics_exist( @@ -698,7 +681,6 @@ def calc_median( def calc_kappa_elbow( selector, decide_comps, - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -710,7 +692,6 @@ def calc_kappa_elbow( %(selector)s %(decide_comps)s %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s @@ -776,8 +757,6 @@ def calc_kappa_elbow( LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) comps2use = selectcomps2use(selector, decide_comps) confirm_metrics_exist( @@ -815,7 +794,6 @@ def calc_rho_elbow( decide_comps, subset_decide_comps="unclassified", rho_elbow_type="kundu", - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -835,7 +813,6 @@ def calc_rho_elbow( The algorithm used to calculate the rho elbow. Current options are: 'kundu' and 'liberal'. Default='kundu'. %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s @@ -855,9 +832,9 @@ def calc_rho_elbow( """ function_name_idx = f"Step {selector.current_node_idx}: calc_rho_elbow" - if rho_elbow_type == "kundu".lower(): + if rho_elbow_type == "kundu": elbow_name = "rho_elbow_kundu" - elif rho_elbow_type == "liberal".lower(): + elif rho_elbow_type == "liberal": elbow_name = "rho_elbow_liberal" else: raise ValueError( @@ -901,8 +878,6 @@ def calc_rho_elbow( LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) comps2use = selectcomps2use(selector, decide_comps) confirm_metrics_exist( @@ -951,7 +926,6 @@ def dec_classification_doesnt_exist( decide_comps, class_comp_exists, at_least_num_exist=1, - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -978,7 +952,6 @@ def dec_classification_doesnt_exist( Instead of just testing whether a classification exists, test whether at least this number of components have that classification. Default=1 %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s tag : :obj:`str` @@ -1032,8 +1005,6 @@ def dec_classification_doesnt_exist( LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) if_true = new_classification if_false = "nochange" @@ -1083,7 +1054,6 @@ def dec_reclassify_high_var_comps( selector, new_classification, decide_comps, - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -1100,7 +1070,6 @@ def dec_reclassify_high_var_comps( in new_classification. %(decide_comps)s %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s tag : :obj:`str` @@ -1147,8 +1116,6 @@ def dec_reclassify_high_var_comps( LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) if_true = new_classification if_false = "nochange" @@ -1224,7 +1191,6 @@ def calc_varex_thresh( thresh_label, percentile_thresh, num_highest_var_comps=None, - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -1251,7 +1217,6 @@ def calc_varex_thresh( a parameter stored in selector.cross_component_metrics ("num_acc_guess" in original decision tree). Default=None %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s @@ -1339,8 +1304,6 @@ def calc_varex_thresh( LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) if not comps2use: log_decision_tree_step( @@ -1388,7 +1351,6 @@ def calc_varex_thresh( @fill_doc def calc_extend_factor( selector, - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -1405,7 +1367,6 @@ def calc_extend_factor( %(selector)s %(decide_comps)s %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s extend_factor : :obj:`float` @@ -1442,8 +1403,6 @@ def calc_extend_factor( if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) outputs["extend_factor"] = get_extend_factor( n_vols=selector.cross_component_metrics["n_vols"], extend_factor=extend_factor @@ -1463,7 +1422,6 @@ def calc_max_good_meanmetricrank( selector, decide_comps, metric_suffix=None, - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -1483,7 +1441,6 @@ def calc_max_good_meanmetricrank( If this variable is not None or "" then it will output: "max_good_meanmetricrank_[metric_suffix]". Default=None %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s @@ -1532,8 +1489,6 @@ def calc_max_good_meanmetricrank( if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) comps2use = selectcomps2use(selector, decide_comps) confirm_metrics_exist( @@ -1569,7 +1524,6 @@ def calc_max_good_meanmetricrank( def calc_varex_kappa_ratio( selector, decide_comps, - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -1585,7 +1539,6 @@ def calc_varex_kappa_ratio( %(selector)s %(decide_comps)s %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s @@ -1637,8 +1590,6 @@ def calc_varex_kappa_ratio( if log_extra_info: LGR.info(f"{function_name_idx}: {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) comps2use = selectcomps2use(selector, decide_comps) confirm_metrics_exist( @@ -1690,7 +1641,6 @@ def calc_revised_meanmetricrank_guesses( selector, decide_comps, restrict_factor=2, - log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, @@ -1706,7 +1656,6 @@ def calc_revised_meanmetricrank_guesses( Default=2. %(log_extra_info)s - %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s @@ -1813,8 +1762,6 @@ def calc_revised_meanmetricrank_guesses( LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: LGR.info(f"{function_name_idx}: {log_extra_info}") - if log_extra_report: - RepLGR.info(log_extra_report) comps2use = selectcomps2use(selector, decide_comps) confirm_metrics_exist( diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index 75f60c96d..098099c06 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -57,10 +57,8 @@ def automatic_selection(component_table, n_echos, n_vols, tree="kundu"): """ LGR.info("Performing ICA component selection with Kundu decision tree v2.5") RepLGR.info( - "Next, component selection was performed to identify " - "BOLD (TE-dependent), non-BOLD (TE-independent), and " - "uncertain (low-variance) components using the Kundu " - "decision tree (v2.5) \\citep{kundu2013integrated}." + "\n\nNext, component selection was performed to identify BOLD (TE-dependent) and " + "non-BOLD (TE-independent) components using a decision tree." ) component_table["classification_tags"] = "" diff --git a/tedana/tests/test_component_selector.py b/tedana/tests/test_component_selector.py index ffc6b797d..294fed508 100644 --- a/tedana/tests/test_component_selector.py +++ b/tedana/tests/test_component_selector.py @@ -83,7 +83,6 @@ def dicts_to_test(treechoice): }, "kwargs": { "log_extra_info": "random2 if Kappa>Rho", - "log_extra_report": "", # Warning for an non-predefined classification assigned to a component "tag_if_true": "random2notpredefined", }, @@ -98,7 +97,6 @@ def dicts_to_test(treechoice): }, "kwargs": { "log_extra_info": "", - "log_extra_report": "", # Warning for a tag that wasn't predefined "tag": "Random2_NotPredefined", }, @@ -111,6 +109,10 @@ def dicts_to_test(treechoice): }, "kwargs": { "tag": "Random1", + # log_extra_report was removed from the code. + # If someone runs a tree that uses this field, rather than crash + # it will log a warning + "log_extra_report": "This should not be logged", }, }, ], @@ -162,9 +164,9 @@ def test_load_config_fails(): def test_load_config_succeeds(): """Tests to make sure load_config succeeds.""" - # The minimal tree should have an id of "minimal_decision_tree_test1" + # The minimal tree should have an id of "minimal_decision_tree" tree = component_selector.load_config("minimal") - assert tree["tree_id"] == "minimal_decision_tree_test1" + assert tree["tree_id"] == "minimal_decision_tree" def test_minimal(): @@ -194,9 +196,7 @@ def test_minimal(): def test_validate_tree_succeeds(): - """ - Tests to make sure validate_tree suceeds for all default - decision trees in decision trees. + """Test to make sure validate_tree suceeds for all default trees. Tested on all default trees in ./tedana/resources/decision_trees Note: If there is a tree in the default trees directory that @@ -223,20 +223,16 @@ def test_validate_tree_succeeds(): def test_validate_tree_warnings(): - """ - Tests to make sure validate_tree triggers all warning conditions - but still succeeds. - """ + """Test to make sure validate_tree triggers all warning conditions.""" # A tree that raises all possible warnings in the validator should still be valid assert component_selector.validate_tree(dicts_to_test("valid")) def test_validate_tree_fails(): - """ - Tests to make sure validate_tree fails for invalid trees - Tests ../resources/decision_trees/invalid*.json and. + """Test to make sure validate_tree fails for invalid trees. + Tests ../resources/decision_trees/invalid*.json and ./data/ComponentSelection/invalid*.json trees. """ diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 183496525..025e527a3 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -132,12 +132,10 @@ def data_for_testing_info(test_dataset=str): def download_test_data(osf_id, test_data_path): - """ - If current data is not already available, downloads tar.gz data. - - stored at `https://osf.io/osf_id/download`. + """If current data is not already available, downloads tar.gz data. - and unpacks into `out_path`. + Data are stored at `https://osf.io/osf_id/download`. + It unpacks into `out_path`. Parameters ---------- @@ -264,6 +262,7 @@ def test_integration_five_echo(skip_integration): out_dir=out_dir, tedpca=0.95, fittype="curvefit", + tree="minimal", fixed_seed=49, tedort=True, verbose=True, diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index 4292468ac..b0a7a78c9 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -30,7 +30,6 @@ def test_manual_classify_smoke(): selector, decide_comps, new_classification, - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", tag="test tag", @@ -55,7 +54,6 @@ def test_manual_classify_smoke(): "rejected", new_classification, clear_classification_tags=True, - log_extra_report="report log", log_extra_info="info log", tag="test tag", dont_warn_reclassify=True, @@ -91,7 +89,6 @@ def test_dec_left_op_right_succeeds(): "rho", left_scale=0.9, right_scale=1.4, - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", tag_if_true="test true tag", @@ -387,7 +384,6 @@ def test_dec_variance_lessthan_thresholds_smoke(): var_metric="normalized variance explained", single_comp_threshold=0.05, all_comp_threshold=0.09, - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", tag_if_true="test true tag", @@ -430,7 +426,6 @@ def test_calc_kappa_elbow(): selector = selection_nodes.calc_kappa_elbow( selector, decide_comps, - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -454,7 +449,6 @@ def test_calc_kappa_elbow(): selector = selection_nodes.calc_kappa_elbow( selector, decide_comps="accepted", - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -507,7 +501,6 @@ def test_calc_rho_elbow(): selector = selection_nodes.calc_rho_elbow( selector, decide_comps, - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -534,7 +527,6 @@ def test_calc_rho_elbow(): selector, decide_comps, rho_elbow_type="liberal", - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -560,7 +552,6 @@ def test_calc_rho_elbow(): selector = selection_nodes.calc_rho_elbow( selector, decide_comps=["accepted", "unclassified"], - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -624,7 +615,6 @@ def test_calc_median_smoke(): decide_comps, metric_name="variance explained", median_label="varex", - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -661,7 +651,6 @@ def test_calc_median_smoke(): decide_comps, metric_name="variance explained", median_label=5, - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -673,7 +662,6 @@ def test_calc_median_smoke(): decide_comps, metric_name=5, median_label="varex", - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -702,7 +690,6 @@ def test_dec_classification_doesnt_exist_smoke(): decide_comps, at_least_num_exist=1, class_comp_exists="provisional accept", - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", tag="test true tag", @@ -754,7 +741,6 @@ def test_dec_classification_doesnt_exist_smoke(): decide_comps=["unclassified", "provisional accept"], at_least_num_exist=5, class_comp_exists="provisional accept", - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", tag="test true tag", @@ -808,7 +794,6 @@ def test_dec_reclassify_high_var_comps(): selector, "unclass_highvar", decide_comps, - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", tag="test true tag", @@ -847,7 +832,6 @@ def test_calc_varex_thresh_smoke(): decide_comps, thresh_label="upper", percentile_thresh=90, - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -866,7 +850,6 @@ def test_calc_varex_thresh_smoke(): decide_comps, thresh_label="", percentile_thresh=90, - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -987,7 +970,6 @@ def test_calc_varex_thresh_smoke(): decide_comps, thresh_label="upper", percentile_thresh=90, - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -1033,7 +1015,6 @@ def test_calc_extend_factor_smoke(): # Standard call to this function. selector = selection_nodes.calc_extend_factor( selector, - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -1080,7 +1061,6 @@ def test_calc_max_good_meanmetricrank_smoke(): selector = selection_nodes.calc_max_good_meanmetricrank( selector, "provisional accept", - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -1161,7 +1141,6 @@ def test_calc_varex_kappa_ratio_smoke(): selector = selection_nodes.calc_varex_kappa_ratio( selector, "provisional accept", - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) @@ -1221,7 +1200,6 @@ def test_calc_revised_meanmetricrank_guesses_smoke(): selector = selection_nodes.calc_revised_meanmetricrank_guesses( selector, ["provisional accept", "provisional reject", "unclassified"], - log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index b902be2b6..106387221 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -36,9 +36,9 @@ def sample_component_table(options=None): def sample_selector(options=None): - """ - Retrieves a sample component table and initializes - a selector using that component table and the minimal tree. + """Retrieve a sample component table and initializes a selector. + + The selector uses that component table and the minimal tree. options: Different strings will alter the selector 'provclass': Change the classifications to "provisional accept" for 4 components @@ -118,8 +118,8 @@ def test_selectcomps2use_fails(): def test_comptable_classification_changer_succeeds(): - """ - All conditions where comptable_classification_changer should run + """All conditions where comptable_classification_changer should run. + Note: This confirms the function runs, but not that outputs are accurate. Also tests conditions where the warning logger is used, but doesn't diff --git a/tedana/tests/test_t2smap.py b/tedana/tests/test_t2smap.py index f3c60276f..8d784c4ca 100644 --- a/tedana/tests/test_t2smap.py +++ b/tedana/tests/test_t2smap.py @@ -12,7 +12,8 @@ class TestT2smap: def test_basic_t2smap1(self): """ - A very simple test, to confirm that t2smap creates output + A very simple test, to confirm that t2smap creates output. + files. """ data_dir = get_test_data_path() @@ -41,7 +42,8 @@ def test_basic_t2smap1(self): def test_basic_t2smap2(self): """ - A very simple test, to confirm that t2smap creates output + A very simple test, to confirm that t2smap creates output. + files when fitmode is set to ts. """ data_dir = get_test_data_path() @@ -70,7 +72,8 @@ def test_basic_t2smap2(self): def test_basic_t2smap3(self): """ - A very simple test, to confirm that t2smap creates output + A very simple test, to confirm that t2smap creates output. + files when combmode is set to 'paid'. """ data_dir = get_test_data_path() @@ -99,7 +102,8 @@ def test_basic_t2smap3(self): def test_basic_t2smap4(self): """ - A very simple test, to confirm that t2smap creates output + A very simple test, to confirm that t2smap creates output. + files when combmode is set to 'paid' and fitmode is set to 'ts'. """ data_dir = get_test_data_path() diff --git a/tedana/tests/test_utils.py b/tedana/tests/test_utils.py index 45f1b103d..9153ccee1 100644 --- a/tedana/tests/test_utils.py +++ b/tedana/tests/test_utils.py @@ -103,8 +103,8 @@ def test_make_adaptive_mask(): def test_smoke_reshape_niimg(): - """ - Ensure that reshape_niimg returns reasonable objects with random inputs + """Ensure that reshape_niimg returns reasonable objects with random inputs. + in the correct format. Note: reshape_niimg could take in 3D or 4D array. @@ -123,8 +123,8 @@ def test_smoke_reshape_niimg(): def test_smoke_make_adaptive_mask(): - """ - Ensure that make_adaptive_mask returns reasonable objects with random inputs + """Ensure that make_adaptive_mask returns reasonable objects with random inputs. + in the correct format. Note: make_adaptive_mask has optional paramters - mask and getsum. @@ -141,8 +141,8 @@ def test_smoke_make_adaptive_mask(): def test_smoke_unmask(): - """ - Ensure that unmask returns reasonable objects with random inputs + """Ensure that unmask returns reasonable objects with random inputs. + in the correct format. Note: unmask could take in 1D or 2D or 3D arrays. @@ -158,8 +158,8 @@ def test_smoke_unmask(): def test_smoke_dice(): - """ - Ensure that dice returns reasonable objects with random inputs + """Ensure that dice returns reasonable objects with random inputs. + in the correct format. Note: two arrays must be in the same length. @@ -171,8 +171,8 @@ def test_smoke_dice(): def test_smoke_andb(): - """ - Ensure that andb returns reasonable objects with random inputs + """Ensure that andb returns reasonable objects with random inputs. + in the correct format. """ arr = np.random.random((100, 10)).tolist() # 2D list of "arrays" @@ -181,8 +181,8 @@ def test_smoke_andb(): def test_smoke_get_spectrum(): - """ - Ensure that get_spectrum returns reasonable objects with random inputs + """Ensure that get_spectrum returns reasonable objects with random inputs. + in the correct format. """ data = np.random.random(100) @@ -194,8 +194,8 @@ def test_smoke_get_spectrum(): def test_smoke_threshold_map(): - """ - Ensure that threshold_map returns reasonable objects with random inputs + """Ensure that threshold_map returns reasonable objects with random inputs. + in the correct format. Note: using 3D array as img, some parameters are optional and are all tested. diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 2333af52a..f67f7de88 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -907,7 +907,7 @@ def tedana_workflow( json.dump(derivative_metadata, fo, sort_keys=True, indent=4) RepLGR.info( - "This workflow used numpy \\citep{van2011numpy}, scipy \\citep{virtanen2020scipy}, " + "\n\nThis workflow used numpy \\citep{van2011numpy}, scipy \\citep{virtanen2020scipy}, " "pandas \\citep{mckinney2010data,reback2020pandas}, " "scikit-learn \\citep{pedregosa2011scikit}, " "nilearn, bokeh \\citep{bokehmanual}, matplotlib \\citep{Hunter2007}, " @@ -922,6 +922,8 @@ def tedana_workflow( with open(repname) as fo: report = [line.rstrip() for line in fo.readlines()] report = " ".join(report) + # Double-spaces reflect new paragraphs + report = report.replace(" ", "\n\n") with open(repname, "w") as fo: fo.write(report)